sgptools 1.0.5__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sgptools/models/bo.py CHANGED
@@ -29,67 +29,90 @@ class BayesianOpt:
29
29
  X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
30
30
  noise_variance (float): data variance
31
31
  kernel (gpflow.kernels.Kernel): gpflow kernel function
32
+ transform (Transform): Transform object
32
33
  """
33
- def __init__(self, X_train, noise_variance, kernel):
34
+ def __init__(self, X_train, noise_variance, kernel,
35
+ transform=None):
34
36
  self.X_train = X_train
35
37
  self.noise_variance = noise_variance
36
38
  self.kernel = kernel
37
39
  self.num_dim = X_train.shape[-1]
38
-
40
+ self.transform = transform
41
+
39
42
  # use the boundaries of the region as the search space
40
43
  self.pbounds_dim = []
41
44
  for i in range(self.num_dim):
42
45
  self.pbounds_dim.append((np.min(X_train[:, i]), np.max(X_train[:, i])))
43
-
46
+
44
47
  def objective(self, **kwargs):
45
- """Computes the objective function (mutual information) for the sensor placement problem
48
+ """Objective function (GP-based Mutual Information)
49
+
50
+ Args:
51
+ x<i> (ndarray): (1, d); Current solution sensor placement location i
46
52
  """
53
+ # MI does not depend on waypoint order (reshape to -1, num_dim)
47
54
  X = []
48
55
  for i in range(len(kwargs)):
49
56
  X.append(kwargs['x{}'.format(i)])
50
57
  X = np.array(X).reshape(-1, self.num_dim)
51
- return -get_mi(X, self.noise_variance, self.kernel, self.X_train)
52
-
58
+ if self.transform is not None:
59
+ X = self.transform.expand(X)
60
+ constraints_loss = self.transform.constraints(X)
61
+
62
+ try:
63
+ mi = get_mi(X, self.X_train, self.noise_variance, self.kernel)
64
+ mi += constraints_loss
65
+ mi = mi.numpy()
66
+ except:
67
+ mi = -1e4 # if the cholskey decomposition fails
68
+ return mi
69
+
53
70
  def optimize(self,
54
71
  num_sensors=10,
55
72
  max_steps=100,
56
73
  X_init=None,
57
- init_points=10):
74
+ init_points=10,
75
+ verbose=0,
76
+ seed=1234):
58
77
  """Optimizes the sensor placements using Bayesian Optimization without any constraints
59
78
 
60
79
  Args:
61
- num_sensors (int): Number of sensor locations to optimize
62
- max_steps (int): Maximum number of optimization steps
63
- X_init (ndarray): (m, d); Initial inducing points
64
- init_points (int): How many steps of random exploration you want to perform.
80
+ num_sensors (int): Number of sensor locations to optimize.
81
+ max_steps (int): Maximum number of optimization steps.
82
+ X_init (ndarray): (m, d); Initial inducing points.
83
+ init_points (int): Number of random solutions used for initial exploration.
65
84
  Random exploration can help by diversifying the exploration space.
85
+ verbose (int): The level of verbosity.
86
+ seed (int): The algorithm will use it to seed the randomnumber generator, ensuring replicability.
66
87
 
67
88
  Returns:
68
89
  Xu (ndarray): (m, d); Solution sensor placement locations
69
90
  """
70
91
  if X_init is None:
71
92
  X_init = get_inducing_pts(self.X_train, num_sensors, random=True)
93
+ else:
94
+ num_sensors = len(X_init.reshape(-1, self.num_dim))
72
95
  X_init = X_init.reshape(-1)
73
96
 
74
97
  pbounds = {}
75
98
  for i in range(self.num_dim*num_sensors):
76
99
  pbounds['x{}'.format(i)] = self.pbounds_dim[i%self.num_dim]
77
100
 
78
- optimizer = BayesianOptimization(
79
- f=self.objective,
80
- pbounds=pbounds,
81
- verbose=0,
82
- random_state=1,
83
- allow_duplicate_points=True
84
- )
85
-
86
- optimizer.maximize(
87
- init_points=init_points,
88
- n_iter=max_steps,
89
- )
101
+ optimizer = BayesianOptimization(f=self.objective,
102
+ pbounds=pbounds,
103
+ verbose=verbose,
104
+ random_state=seed,
105
+ allow_duplicate_points=True)
106
+ optimizer.maximize(init_points=init_points,
107
+ n_iter=max_steps)
90
108
 
91
109
  sol = []
92
110
  for i in range(self.num_dim*num_sensors):
93
111
  sol.append(optimizer.max['params']['x{}'.format(i)])
94
- return np.array(sol).reshape(-1, self.num_dim)
95
-
112
+ sol = np.array(sol).reshape(-1, self.num_dim)
113
+ if self.transform is not None:
114
+ sol = self.transform.expand(sol,
115
+ expand_sensor_model=False)
116
+ if not isinstance(sol, np.ndarray):
117
+ sol = sol.numpy()
118
+ return sol.reshape(-1, self.num_dim)
sgptools/models/cma_es.py CHANGED
@@ -57,48 +57,24 @@ class CMA_ES:
57
57
  """
58
58
  self.noise_variance = noise_variance
59
59
  self.kernel = kernel
60
-
61
- def constraint(self, X):
62
- """Constraint function for the optimization problem (constraint to limit the boundary of the region)
63
- Does not work well with CMA-ES as it is a step function and is not continuous
64
-
65
- Args:
66
- X (ndarray): (n, d); Current sensor placement locations
67
- """
68
- X = np.array(X).reshape(-1, self.num_dim)
69
- lagrangian = [self.boundaries.contains(geometry.Point(x[0], x[1])) for x in X]
70
- lagrangian = np.logical_not(lagrangian).astype(float)
71
- return lagrangian
72
-
73
- def distance_constraint(self, X):
74
- """Constraint function for the optimization problem (constraint to limit the total travel distance)
75
- Does not work well with CMA-ES as it is a step function and is not continuous
76
-
77
- Args:
78
- X (ndarray): (n, d); Current sensor placement locations
79
- """
80
- X = np.array(X).reshape(self.num_robots, -1, self.num_dim)
81
- dists = np.linalg.norm(X[:, 1:] - X[:, :-1], axis=-1)
82
- lagrangian = dists - self.distance_budget
83
- lagrangian_mask = np.logical_not(lagrangian <= 0)
84
- lagrangian[lagrangian_mask] = 0
85
- lagrangian = np.sum(lagrangian)
86
- return lagrangian
87
60
 
88
61
  def objective(self, X):
89
62
  """Objective function (GP-based Mutual Information)
90
63
 
91
64
  Args:
92
- X (ndarray): (n, d); Initial sensor placement locations
65
+ X (ndarray): (n, d); Current solution sensor placement locations
93
66
  """
94
67
  # MI does not depend on waypoint order (reshape to -1, num_dim)
95
68
  X = np.array(X).reshape(-1, self.num_dim)
69
+ constraints_loss = 0.0
96
70
  if self.transform is not None:
97
- X = self.transform.expand(X,
98
- expand_sensor_model=False).numpy()
71
+ X = self.transform.expand(X)
72
+ constraints_loss = self.transform.constraints(X)
99
73
 
100
74
  try:
101
- mi = -get_mi(X, self.noise_variance, self.kernel, self.X_train)
75
+ mi = -get_mi(X, self.X_train, self.noise_variance, self.kernel)
76
+ mi -= constraints_loss
77
+ mi = mi.numpy()
102
78
  except:
103
79
  mi = 0.0 # if the cholskey decomposition fails
104
80
  return mi
@@ -106,16 +82,20 @@ class CMA_ES:
106
82
  def optimize(self,
107
83
  num_sensors=10,
108
84
  max_steps=5000,
109
- tol=1e-11,
110
- X_init=None):
111
- """Optimizes the SP objective function using CMA-ES without any constraints
85
+ tol=1e-6,
86
+ X_init=None,
87
+ verbose=0,
88
+ seed=1234):
89
+ """Optimizes the sensor placements using CMA-ES without any constraints
112
90
 
113
91
  Args:
114
92
  num_sensors (int): Number of sensor locations to optimize
115
93
  max_steps (int): Maximum number of optimization steps
116
94
  tol (float): Convergence tolerance to decide when to stop optimization
117
95
  X_init (ndarray): (m, d); Initial inducing points
118
-
96
+ verbose (int): The level of verbosity.
97
+ seed (int): The algorithm will use it to seed the randomnumber generator, ensuring replicability.
98
+
119
99
  Returns:
120
100
  Xu (ndarray): (m, d); Solution sensor placement locations
121
101
  """
@@ -127,63 +107,15 @@ class CMA_ES:
127
107
 
128
108
  xopt, _ = cma.fmin2(self.objective, X_init, sigma0,
129
109
  options={'maxfevals': max_steps,
130
- 'verb_disp': 0,
110
+ 'verb_disp': verbose,
131
111
  'tolfun': tol,
132
- 'seed': 1234},
112
+ 'seed': seed},
133
113
  restarts=5)
134
114
 
135
115
  xopt = np.array(xopt).reshape(-1, self.num_dim)
136
116
  if self.transform is not None:
137
117
  xopt = self.transform.expand(xopt,
138
- expand_sensor_model=False).numpy()
139
-
140
- return xopt.reshape(-1, self.num_dim)
141
-
142
- def doptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
143
- """Optimizes the SP objective function using CMA-ES with a distance budget constraint
144
-
145
- Args:
146
- num_sensors (int): Number of sensor locations to optimize
147
- max_steps (int): Maximum number of optimization steps
148
- tol (float): Convergence tolerance to decide when to stop optimization
149
-
150
- Returns:
151
- Xu (ndarray): (m, d); Solution sensor placement locations
152
- """
153
- sigma0 = 1.0
154
- idx = np.random.randint(len(self.X_train), size=num_sensors)
155
- x_init = self.X_train[idx].reshape(-1)
156
- cfun = cma.ConstrainedFitnessAL(self.objective, self.distance_constraint)
157
- xopt, _ = cma.fmin2(cfun, x_init, sigma0,
158
- options={'maxfevals': max_steps,
159
- 'verb_disp': 0,
160
- 'tolfun': tol,
161
- 'seed': 1234},
162
- callback=cfun.update,
163
- restarts=5)
164
- return xopt.reshape(-1, self.num_dim)
165
-
166
- def coptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
167
- """Optimizes the SP objective function using CMA-ES with the constraints
168
- to ensure that the sensors are placed within the boundaries of the region
169
-
170
- Args:
171
- num_sensors (int): Number of sensor locations to optimize
172
- max_steps (int): Maximum number of optimization steps
173
- tol (float): Convergence tolerance to decide when to stop optimization
174
-
175
- Returns:
176
- Xu (ndarray): (m, d); Solution sensor placement locations
177
- """
178
- sigma0 = 1.0
179
- idx = np.random.randint(len(self.X_train), size=num_sensors*self.num_robots)
180
- x_init = self.X_train[idx].reshape(-1)
181
- cfun = cma.ConstrainedFitnessAL(self.objective, self.constraint)
182
- xopt, _ = cma.fmin2(cfun, x_init, sigma0,
183
- options={'maxfevals': max_steps,
184
- 'verb_disp': 0,
185
- 'tolfun': tol,
186
- 'seed': 1234},
187
- callback=cfun.update,
188
- restarts=5)
189
- return xopt.reshape(-1, self.num_dim)
118
+ expand_sensor_model=False)
119
+ if not isinstance(xopt, np.ndarray):
120
+ xopt = xopt.numpy()
121
+ return xopt.reshape(-1, self.num_dim)
@@ -15,17 +15,9 @@
15
15
  """Provides transforms to model complex sensor field of views and handle informative path planning
16
16
  """
17
17
 
18
- try:
19
- from tensorflow_graphics.math.interpolation import bspline
20
- except:
21
- pass
22
-
23
18
  import tensorflow as tf
24
19
  import numpy as np
25
20
 
26
- from scipy.optimize import linear_sum_assignment
27
- from sklearn.metrics import pairwise_distances
28
-
29
21
 
30
22
  class Transform:
31
23
  """Base class for transformations of the inducing points, including expansion and aggregation transforms.
@@ -98,84 +90,6 @@ class Transform:
98
90
  """
99
91
  return 0.
100
92
 
101
- def distance(self, Xu):
102
- """Computes the distance incured by sequentially visiting the inducing points
103
-
104
- Args:
105
- Xu (ndarray): Inducing points from which to compute the path length
106
-
107
- Returns:
108
- dist (float): path length
109
- """
110
- dist = tf.math.reduce_sum(tf.norm(Xu[1:]-Xu[:-1], axis=1))
111
- return dist
112
-
113
-
114
- class SquareTransform(Transform):
115
- """Non-point Transform to model a square FoV. Only works for single robot cases.
116
- ToDo: update expand function to handle multi-robot case.
117
-
118
- Args:
119
- length (float): Length of the square FoV
120
- num_side (int): Number of points along each side of the FoV
121
- """
122
- def __init__(self, length, num_side, **kwargs):
123
- super().__init__(**kwargs)
124
- self.length = length
125
- self.num_side = num_side
126
- self.length_factor=length/(self.num_side)
127
- self.num_length = int(length/self.length_factor)
128
-
129
- if self.aggregation_size == 0:
130
- self.aggregation_size = None
131
- elif self.aggregation_size is None:
132
- self.aggregation_size = num_side**2
133
-
134
- def expand(self, Xu):
135
- """Applies the expansion transformation to the inducing points
136
-
137
- Args:
138
- Xu (ndarray): (1, m, 3); Inducing points in the position and orientation space.
139
- `m` is the number of inducing points,
140
- `3` is the dimension of the space (x, y, angle in radians)
141
-
142
- Returns:
143
- Xu (ndarray): (mp, 2); Inducing points in input space.
144
- `p` is the number of points each inducing point is mapped
145
- to in order to form the FoV.
146
- """
147
- x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=2)
148
- x = tf.squeeze(x)
149
- y = tf.squeeze(y)
150
- theta = tf.squeeze(theta)
151
-
152
- points = []
153
- for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):
154
- points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2),
155
- (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)],
156
- [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2),
157
- (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)],
158
- self.num_side, axis=1))
159
- xy = tf.concat(points, axis=1)
160
- xy = tf.transpose(xy, [2, 1, 0])
161
- xy = tf.reshape(xy, [-1, 2])
162
- xy = self._reshape(xy, tf.shape(Xu)[1])
163
- return xy
164
-
165
- def _reshape(self, X, num_inducing):
166
- """Reorder the inducing points to be in the correct order for aggregation with square FoV.
167
-
168
- Args:
169
- X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each
170
- inducing point is mapped to in order to form the FoV.
171
-
172
- Returns:
173
- Xu (ndarray): (mp, 2); Reorder inducing points
174
- """
175
- X = tf.reshape(X, (num_inducing, -1, self.num_side, self.num_side, 2))
176
- X = tf.transpose(X, (0, 2, 1, 3, 4))
177
- X = tf.reshape(X, (-1, 2))
178
- return X
179
93
 
180
94
  class IPPTransform(Transform):
181
95
  """Transform to model IPP problems
@@ -184,6 +98,7 @@ class IPPTransform(Transform):
184
98
  * For point sensing, set `sampling_rate = 2`
185
99
  * For continuous sensing, set `sampling_rate > 2` (account for the information along the path)
186
100
  * For continuous sensing with aggregation, set `sampling_rate > 2` and `aggregate_fov = True` (faster but solution quality is a bit diminished)
101
+ * If using a non-point FoV model with continuous sampling, only the FoV inducing points are aggregated
187
102
  * For multi-robot case, set `num_robots > 1`
188
103
  * For onlineIPP use `update_fixed` to freeze the visited waypoints
189
104
 
@@ -192,7 +107,7 @@ class IPPTransform(Transform):
192
107
  distance_budget (float): Distance budget for the path
193
108
  num_robots (int): Number of robots
194
109
  Xu_fixed (ndarray): (num_robots, num_visited, num_dim); Visited waypoints that don't need to be optimized
195
- num_dim (int): Dimension of the data collection environment
110
+ num_dim (int): Number of dimensions of the inducing points
196
111
  sensor_model (Transform): Transform object to expand each inducing point to `p` points
197
112
  approximating each sensor's FoV
198
113
  aggregate_fov (bool): Used only when sampling_rate > 2, i.e., when using a continuous sensing model.
@@ -220,8 +135,11 @@ class IPPTransform(Transform):
220
135
 
221
136
  # Set aggregation size to sampling rate if aggregate_fov is True
222
137
  # and sampling rate is enabled (greater than 2)
223
- if aggregate_fov and sampling_rate > 2:
224
- self.aggregation_size = sampling_rate
138
+ if aggregate_fov:
139
+ if self.sensor_model is not None:
140
+ self.sensor_model.enable_aggregation()
141
+ elif sampling_rate > 2:
142
+ self.aggregation_size = sampling_rate
225
143
 
226
144
  # Initilize variable to store visited waypoints for onlineIPP
227
145
  if Xu_fixed is not None:
@@ -278,7 +196,10 @@ class IPPTransform(Transform):
278
196
  Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
279
197
 
280
198
  if self.sensor_model is not None:
281
- Xu = self.sensor_model.expand(Xu)
199
+ Xu_ = []
200
+ for i in range(self.num_robots):
201
+ Xu_.append(self.sensor_model.expand(Xu[i]))
202
+ Xu = tf.concat(Xu_, axis=0)
282
203
  return Xu
283
204
 
284
205
  Xu = tf.reshape(Xu, (-1, self.num_dim))
@@ -315,6 +236,7 @@ class IPPTransform(Transform):
315
236
  if self.distance_budget is None:
316
237
  return 0.
317
238
  else:
239
+ # Only do fixed points expansion transform
318
240
  Xu = self.expand(Xu, expand_sensor_model=False)
319
241
  dist = self.distance(Xu)-self.distance_budget
320
242
  dist = tf.reduce_sum(tf.nn.relu(dist))
@@ -323,39 +245,146 @@ class IPPTransform(Transform):
323
245
 
324
246
  def distance(self, Xu):
325
247
  """Computes the distance incured by sequentially visiting the inducing points
326
- ToDo: Change distance from 2d to nd. Currently limited to 2d
327
- to ensure the rotation angle is not included when using
328
- a square FoV sensor.
248
+ Args:
249
+ Xu (ndarray): (m, num_dim); Inducing points from which to compute the path lengths
250
+ `m` is the number of inducing points
251
+ `num_dim` dimension of the data collection environment
252
+ Returns:
253
+ dist (float or tensor of floats): path length(s)
254
+ """
255
+ Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
256
+ if self.sensor_model is not None:
257
+ dists = []
258
+ for i in range(self.num_robots):
259
+ dists.append(self.sensor_model.distance(Xu[i]))
260
+ dists = tf.concat(dists, axis=0)
261
+ return dists
262
+ else:
263
+ # Assumes 2D waypoints by default
264
+ dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)
265
+ dist = tf.reduce_sum(dist, axis=1)
266
+ return dist
267
+
268
+
269
+ class SquareTransform(Transform):
270
+ """Non-point Transform to model a square FoV. Only works for single robot cases.
271
+
272
+ Args:
273
+ length (float): Length of the square FoV
274
+ num_side (int): Number of points along each side of the FoV
275
+ aggregate_fov (bool): If `True`, covariances corresponding to interpolated inducing points used to
276
+ approximate the sensor FoV are aggregated to reduce the matrix inversion cost
277
+ """
278
+ def __init__(self, length, num_side, aggregate_fov=False, **kwargs):
279
+ super().__init__(**kwargs)
280
+ self.length = length
281
+ self.num_side = num_side
282
+ self.length_factor=length/(self.num_side)
283
+ self.num_length = int(length/self.length_factor)
284
+
285
+ if aggregate_fov:
286
+ self.enable_aggregation()
287
+
288
+ def enable_aggregation(self, size=None):
289
+ """Enable FoV covariance aggregation, which reduces the covariance matrix inversion cost by reducing the
290
+ covariance matrix size.
291
+
292
+ Args:
293
+ size (int): If None, all the interpolated inducing points within the FoV are aggregated. Alternatively,
294
+ the number of inducing points to aggregate can be explicitly defined using this variable.
295
+ """
296
+ if size is None:
297
+ self.aggregation_size = self.num_side**2
298
+ else:
299
+ self.aggregation_size = size
300
+
301
+ def expand(self, Xu):
302
+ """Applies the expansion transformation to the inducing points
303
+
304
+ Args:
305
+ Xu (ndarray): (m, 3); Inducing points in the position and orientation space.
306
+ `m` is the number of inducing points,
307
+ `3` is the dimension of the space (x, y, angle in radians)
308
+
309
+ Returns:
310
+ Xu (ndarray): (mp, 2); Inducing points in input space.
311
+ `p` is the number of points each inducing point is mapped
312
+ to in order to form the FoV.
313
+ """
314
+ x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=1)
315
+ x = tf.reshape(x, [-1,])
316
+ y = tf.reshape(y, [-1,])
317
+ theta = tf.reshape(theta, [-1,])
318
+
319
+ points = []
320
+ for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):
321
+ points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2),
322
+ (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)],
323
+ [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2),
324
+ (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)],
325
+ self.num_side, axis=1))
326
+ xy = tf.concat(points, axis=1)
327
+ xy = tf.transpose(xy, [2, 1, 0])
328
+ xy = self._reshape(xy)
329
+ return xy
330
+
331
+ def _reshape(self, X):
332
+ """Reorder the inducing points to be in the correct order for aggregation with square FoV.
329
333
 
330
334
  Args:
331
- Xu (ndarray): Inducing points from which to compute the path lengths
335
+ X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each
336
+ inducing point is mapped to in order to form the FoV.
337
+
338
+ Returns:
339
+ Xu (ndarray): (mp, 2); Reorder inducing points
340
+ """
341
+ X = tf.reshape(X, (-1, self.num_side, self.num_side, 2))
342
+ X = tf.transpose(X, (1, 0, 2, 3))
343
+ X = tf.reshape(X, (-1, 2))
344
+ return X
345
+
346
+ def distance(self, Xu):
347
+ """Computes the distance incured by sequentially visiting the inducing points
348
+ Args:
349
+ Xu (ndarray): (m, 3); Inducing points from which to compute the path lengths.
350
+ `m` is the number of inducing points.
332
351
 
333
352
  Returns:
334
353
  dist (float): path lengths
335
354
  """
336
- Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
337
- dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)
338
- dist = tf.reduce_sum(dist, axis=1)
355
+ Xu = tf.reshape(Xu, (-1, 3))[:, :2]
356
+ dist = tf.norm(Xu[1:] - Xu[:-1], axis=-1)
357
+ dist = tf.reduce_sum(dist, axis=0)
339
358
  return dist
340
-
359
+
341
360
 
342
361
  class SquareHeightTransform(Transform):
343
- """Non-point Transform to model a height-dependent square FoV. Only works for single robot cases.
344
- ToDo: Convert from single to multi-robot setup and make it compatible with IPPTransform
362
+ """Non-point Transform to model a height-dependent square FoV
345
363
 
346
364
  Args:
347
- num_points (int): Number of points along each side of the FoV
348
- distance_budget (float): Distance budget for the path
365
+ num_side (int): Number of points along each side of the FoV
366
+ aggregate_fov (bool): If `True`, covariances corresponding to interpolated inducing points used to
367
+ approximate the sensor FoV are aggregated to reduce the matrix inversion cost
349
368
  """
350
- def __init__(self, num_points, distance_budget=None, **kwargs):
369
+ def __init__(self, num_side, aggregate_fov=False, **kwargs):
351
370
  super().__init__(**kwargs)
352
- self.num_points = num_points
353
- self.distance_budget = distance_budget
354
-
355
- if self.aggregation_size == 0:
356
- self.aggregation_size = None
357
- elif self.aggregation_size is None:
358
- self.aggregation_size = num_points**2
371
+ self.num_side = num_side
372
+
373
+ if aggregate_fov:
374
+ self.enable_aggregation()
375
+
376
+ def enable_aggregation(self, size=None):
377
+ """Enable FoV covariance aggregation, which reduces the covariance matrix inversion cost by reducing the
378
+ covariance matrix size.
379
+
380
+ Args:
381
+ size (int): If None, all the interpolated inducing points within the FoV are aggregated. Alternatively,
382
+ the number of inducing points to aggregate can be explicitly defined using this variable.
383
+ """
384
+ if size is None:
385
+ self.aggregation_size = self.num_side**2
386
+ else:
387
+ self.aggregation_size = size
359
388
 
360
389
  def expand(self, Xu):
361
390
  """
@@ -372,17 +401,17 @@ class SquareHeightTransform(Transform):
372
401
  to in order to form the FoV.
373
402
  """
374
403
  x, y, h = tf.split(Xu, num_or_size_splits=3, axis=1)
375
- x = tf.squeeze(x)
376
- y = tf.squeeze(y)
377
- h = tf.squeeze(h)
404
+ x = tf.reshape(x, [-1,])
405
+ y = tf.reshape(y, [-1,])
406
+ h = tf.reshape(h, [-1,])
378
407
 
379
- delta = h / (self.num_points - 1)
408
+ delta = h / (self.num_side - 1)
380
409
 
381
410
  pts = []
382
- for i in range(self.num_points):
411
+ for i in range(self.num_side):
383
412
  pts.append(tf.linspace([x - h/2, y - (h/2) + (delta * i)],
384
413
  [x + h/2, y - (h/2) + (delta * i)],
385
- self.num_points,
414
+ self.num_side,
386
415
  axis=1))
387
416
  xy = tf.concat(pts, axis=1)
388
417
  xy = tf.transpose(xy, [2, 1, 0])
@@ -400,7 +429,22 @@ class SquareHeightTransform(Transform):
400
429
  Returns:
401
430
  Xu (ndarray): (mp, 2); Reorder inducing points
402
431
  """
403
- X = tf.reshape(X, (num_inducing, -1, self.num_points, self.num_points, 2))
432
+ X = tf.reshape(X, (num_inducing, -1, self.num_side, self.num_side, 2))
404
433
  X = tf.transpose(X, (0, 2, 1, 3, 4))
405
434
  X = tf.reshape(X, (-1, 2))
406
435
  return X
436
+
437
+ def distance(self, Xu):
438
+ """Computes the distance incured by sequentially visiting the inducing points
439
+ Args:
440
+ Xu (ndarray): (m, 3); Inducing points from which to compute the path lengths.
441
+ `m` is the number of inducing points.
442
+
443
+ Returns:
444
+ dist (float): path lengths
445
+ """
446
+ Xu = tf.reshape(Xu, (-1, 3))
447
+ dist = tf.norm(Xu[1:] - Xu[:-1], axis=-1)
448
+ dist = tf.reduce_sum(dist, axis=0)
449
+ return dist
450
+
@@ -12,8 +12,8 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from .core.augmented_gpr import AugmentedGPR
16
15
  from apricot import CustomSelection
16
+ from gpflow.models.gpr import GPR
17
17
  import numpy as np
18
18
 
19
19
 
@@ -32,15 +32,24 @@ class GreedyMI:
32
32
  kernel (gpflow.kernels.Kernel): gpflow kernel function
33
33
  transform (Transform): Transform object
34
34
  """
35
- def __init__(self, S, V, noise_variance, kernel, transform=None):
35
+ def __init__(self, S, V, noise_variance, kernel,
36
+ transform=None):
36
37
  self.S = S
37
38
  self.V = V
38
39
  self.kernel = kernel
39
40
  self.input_dim = S.shape[1]
40
41
  self.noise_variance = noise_variance
41
42
  self.transform = transform
42
-
43
+
43
44
  def mutual_info(self, x):
45
+ """Computes mutual information using the points `x`
46
+
47
+ Args:
48
+ x (ndarray): (n); Indices of the solution placement locations
49
+
50
+ Returns:
51
+ MI (float): Mutual information between the placement x and candidate locations
52
+ """
44
53
  x = np.array(x).reshape(-1).astype(int)
45
54
  A = self.S[x[:-1]].reshape(-1, self.input_dim)
46
55
  y = self.S[x[-1]].reshape(-1, self.input_dim)
@@ -50,26 +59,27 @@ class GreedyMI:
50
59
  else:
51
60
  if self.transform is not None:
52
61
  A = self.transform.expand(A)
53
- a_gp = AugmentedGPR(data=(A, np.zeros((len(A), 1))),
54
- kernel=self.kernel,
55
- noise_variance=self.noise_variance,
56
- transform=self.transform)
57
- _, sigma_a = a_gp.predict_f(y, aggregate_train=True)
62
+ a_gp = GPR(data=(A, np.zeros((len(A), 1))),
63
+ kernel=self.kernel,
64
+ noise_variance=self.noise_variance)
65
+ _, sigma_a = a_gp.predict_f(y)
58
66
 
59
- # Remove locations in A to build A bar
67
+ # Remove locations in A∪y from V to build A bar (Refer to Krause et al., 2008)
60
68
  V_ = self.V.copy()
61
69
  V_rows = V_.view([('', V_.dtype)] * V_.shape[1])
70
+
62
71
  if self.transform is not None:
63
- A_ = self.transform.expand(self.S[x]).numpy()
72
+ solution = self.S[x].reshape(-1, self.input_dim)
73
+ A_ = self.transform.expand(solution)
64
74
  else:
65
75
  A_ = self.S[x]
66
76
  A_rows = A_.view([('', V_.dtype)] * A_.shape[1])
77
+
67
78
  V_ = np.setdiff1d(V_rows, A_rows).view(V_.dtype).reshape(-1, V_.shape[1])
68
79
 
69
- self.v_gp = AugmentedGPR(data=(V_, np.zeros((len(V_), 1))),
70
- kernel=self.kernel,
71
- noise_variance=self.noise_variance,
72
- transform=self.transform)
80
+ self.v_gp = GPR(data=(V_, np.zeros((len(V_), 1))),
81
+ kernel=self.kernel,
82
+ noise_variance=self.noise_variance)
73
83
  _, sigma_v = self.v_gp.predict_f(y)
74
84
 
75
85
  return (sigma_a/sigma_v).numpy().squeeze()
@@ -30,20 +30,16 @@ class GreedySGP:
30
30
  V (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
31
31
  noise_variance (float): Data noise variance
32
32
  kernel (gpflow.kernels.Kernel): gpflow kernel function
33
- Xu_fixed (ndarray): (m, d); Inducing points that are not optimized and are always
34
- added to the inducing points set during loss function computation
35
33
  transform (Transform): Transform object
36
34
  """
37
35
  def __init__(self, num_inducing, S, V, noise_variance, kernel,
38
- Xu_fixed=None,
39
36
  transform=None):
40
- self.gp = AugmentedSGPR((V, np.zeros((len(V), 1))),
41
- noise_variance=noise_variance,
42
- kernel=kernel,
43
- inducing_variable=S[:num_inducing],
44
- transform=transform)
37
+ self.sgp = AugmentedSGPR((V, np.zeros((len(V), 1))),
38
+ noise_variance=noise_variance,
39
+ kernel=kernel,
40
+ inducing_variable=S[:num_inducing],
41
+ transform=transform)
45
42
  self.locs = S
46
- self.Xu_fixed = Xu_fixed
47
43
  self.num_inducing = num_inducing
48
44
  self.inducing_dim = S.shape[1]
49
45
 
@@ -51,21 +47,25 @@ class GreedySGP:
51
47
  """Computes the SGP's optimization bound using the inducing points `x`
52
48
 
53
49
  Args:
54
- x (ndarray): (n, d); Inducing points
50
+ x (ndarray): (n); Indices of the solution placement locations
55
51
 
56
52
  Returns:
57
53
  elbo (float): Evidence lower bound/SGP's optimization bound value
58
54
  """
59
55
  x = np.array(x).reshape(-1).astype(int)
60
56
  Xu = np.ones((self.num_inducing, self.inducing_dim), dtype=np.float32)
57
+
58
+ # Initialize all inducing points at the first solution placement location.
59
+ # Ensures that the number of inducing points is always fixed and no additional
60
+ # information is passed to the SGP
61
61
  Xu *= self.locs[x][0]
62
- Xu[-len(x):] = self.locs[x]
63
62
 
64
- if self.Xu_fixed is not None:
65
- Xu[:len(self.Xu_fixed)] = self.Xu_fixed
63
+ # Copy all given solution placements to the inducing points set
64
+ Xu[-len(x):] = self.locs[x]
66
65
 
67
- self.gp.inducing_variable.Z.assign(Xu)
68
- return self.gp.elbo().numpy()
66
+ # Update the SGP inducing points
67
+ self.sgp.inducing_variable.Z.assign(Xu)
68
+ return self.sgp.elbo().numpy() # return the ELBO
69
69
 
70
70
 
71
71
  def get_greedy_sgp_sol(num_sensors, candidates, X_train, noise_variance, kernel,
sgptools/utils/data.py CHANGED
@@ -91,8 +91,8 @@ def prep_tif_dataset(dataset_path):
91
91
  dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.
92
92
 
93
93
  Returns:
94
- X: (n, d); Dataset input features
95
- y: (n, 1); Dataset labels
94
+ X (ndarray): (n, d); Dataset input features
95
+ y (ndarray): (n, 1); Dataset labels
96
96
  '''
97
97
  data = PIL.Image.open(dataset_path)
98
98
  data = np.array(data)
@@ -116,22 +116,31 @@ def prep_tif_dataset(dataset_path):
116
116
 
117
117
  ####################################################
118
118
 
119
- def prep_synthetic_dataset():
119
+ def prep_synthetic_dataset(shape=(50, 50),
120
+ min_height=0.0,
121
+ max_height=30.0,
122
+ roughness=0.5,
123
+ **kwargs):
120
124
  '''Generates a 50x50 grid of synthetic elevation data using the diamond square algorithm.
121
125
 
122
126
  Refer to the following repo for more details:
123
127
  - [https://github.com/buckinha/DiamondSquare](https://github.com/buckinha/DiamondSquare)
124
128
 
125
129
  Args:
130
+ shape (tuple): (x, y); Grid size along the x and y axis
131
+ min_height (float): Minimum allowed height in the sampled data
132
+ max_height (float): Maximum allowed height in the sampled data
133
+ roughness (float): Roughness of the sampled data
126
134
 
127
135
  Returns:
128
- X: (n, d); Dataset input features
129
- y: (n, 1); Dataset labels
136
+ X (ndarray): (n, d); Dataset input features
137
+ y (ndarray): (n, 1); Dataset labels
130
138
  '''
131
- data = diamond_square(shape=(50,50),
132
- min_height=0,
133
- max_height=30,
134
- roughness=0.5)
139
+ data = diamond_square(shape=shape,
140
+ min_height=min_height,
141
+ max_height=max_height,
142
+ roughness=roughness,
143
+ **kwargs)
135
144
 
136
145
  # create x and y coordinates from the extent
137
146
  x_coords = np.arange(0, data.shape[0])/10
@@ -145,17 +154,16 @@ def prep_synthetic_dataset():
145
154
 
146
155
  ####################################################
147
156
 
148
- def get_dataset(dataset_type, dataset_path=None,
157
+ def get_dataset(dataset_path=None,
149
158
  num_train=1000,
150
159
  num_test=2500,
151
- num_candidates=150):
160
+ num_candidates=150,
161
+ **kwargs):
152
162
  """Method to generate/load datasets and preprocess them for SP/IPP. The method uses kmeans to
153
163
  generate train and test sets.
154
164
 
155
165
  Args:
156
- dataset_type (str): 'tif' or 'synthetic'. 'tif' will load and proprocess data from a GeoTIFF file.
157
- 'synthetic' will use the diamond square algorithm to generate synthetic elevation data.
158
- dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.
166
+ dataset_path (str): Path to a tif dataset file. If None, the method will generate synthetic data.
159
167
  num_train (int): Number of training samples to generate.
160
168
  num_test (int): Number of testing samples to generate.
161
169
  num_candidates (int): Number of candidate locations to generate.
@@ -170,10 +178,10 @@ def get_dataset(dataset_type, dataset_path=None,
170
178
  y (ndarray): (n, 1); Full dataset labels
171
179
  """
172
180
  # Load the data
173
- if dataset_type == 'tif':
181
+ if dataset_path is not None:
174
182
  X, y = prep_tif_dataset(dataset_path=dataset_path)
175
- elif dataset_type == 'synthetic':
176
- X, y = prep_synthetic_dataset()
183
+ else:
184
+ X, y = prep_synthetic_dataset(**kwargs)
177
185
 
178
186
  X_train = get_inducing_pts(X, num_train)
179
187
  X_train, y_train = cont2disc(X_train, X, y)
sgptools/utils/gpflow.py CHANGED
@@ -18,6 +18,7 @@ from gpflow.utilities.traversal import print_summary
18
18
  import tensorflow as tf
19
19
  import tensorflow_probability as tfp
20
20
 
21
+ import numpy as np
21
22
  import matplotlib.pyplot as plt
22
23
 
23
24
 
@@ -87,6 +88,43 @@ def get_model_params(X_train, y_train,
87
88
 
88
89
  return loss, gpr_gt.likelihood.variance, kernel
89
90
 
91
+
92
+ class TraceInducingPts(gpflow.monitor.MonitorTask):
93
+ '''
94
+ GPflow monitoring task, used to trace the inducing points
95
+ states at every step during optimization.
96
+
97
+ Args:
98
+ model (gpflow.models.sgpr): GPflow GP/SGP model
99
+ '''
100
+ def __init__(self, model):
101
+ super().__init__()
102
+ self.trace = []
103
+ self.model = model
104
+
105
+ def run(self, **kwargs):
106
+ '''
107
+ Method used to extract the inducing points and
108
+ apply IPP fixed points transform if available
109
+ '''
110
+ Xu = self.model.inducing_variable.Z
111
+ Xu_exp = self.model.transform.expand(Xu,
112
+ expand_sensor_model=False).numpy()
113
+ self.trace.append(Xu_exp)
114
+
115
+ def get_trace(self):
116
+ '''
117
+ Returns the inducing points collected at each optimization step
118
+
119
+ Returns:
120
+ trace (ndarray): (n, m, d); Array with the inducing points.
121
+ `n` is the number of optimization steps;
122
+ `m` is the number of inducing points;
123
+ `d` is the dimension of the inducing points.
124
+ '''
125
+ return np.array(self.trace)
126
+
127
+
90
128
  def optimize_model(model,
91
129
  max_steps=2000,
92
130
  kernel_grad=True,
@@ -102,21 +140,22 @@ def optimize_model(model,
102
140
  Trains a GP/SGP model
103
141
 
104
142
  Args:
105
- model (gpflow.models): GPflow GP/SGP model to train
106
- max_steps (int): Maximum number of training steps
107
- kernel_grad (bool): If False, the kernel parameters will not be optimized
108
- lr (float): Optimization learning rate
109
- optimizer (str): Optimizer to use for training (`scipy` or `tf`)
143
+ model (gpflow.models): GPflow GP/SGP model to train.
144
+ max_steps (int): Maximum number of training steps.
145
+ kernel_grad (bool): If `False`, the kernel parameters will not be optimized.
146
+ Ignored when `trainable_variables` are passed.
147
+ lr (float): Optimization learning rate.
148
+ optimizer (str): Optimizer to use for training (`scipy` or `tf`).
110
149
  method (str): Optimization method refer to [scipy minimize](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize)
111
150
  and [tf optimizers](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers) for full list
112
- verbose (bool): If true, the training progress will be printed
151
+ verbose (bool): If `True`, the training progress will be printed when using Scipy.
113
152
  trace_fn (str): Function to trace metrics during training.
114
- If `None`, the loss values are traced;
115
- if `traceXu`, it the inducing points states at each optimization step are traced
116
- convergence_criterion (bool): It True, enables early stopping when the loss plateaus
117
- trainable_variables (list): List of model variables to train
118
- (can be used to limit training to a subset of variables)
119
- tol (float): Convergence tolerance to decide when to stop optimization
153
+ If `None`, the loss values are returned;
154
+ If `traceXu`, it the inducing points states at each optimization step are returned (increases computation time).
155
+ convergence_criterion (bool): If `True` and using a tensorflow optimizer, it
156
+ enables early stopping when the loss plateaus.
157
+ trainable_variables (list): List of model variables to train.
158
+ tol (float): Convergence tolerance to decide when to stop optimization.
120
159
  """
121
160
  # Train all variables if trainable_variables are not provided
122
161
  # If kernel_gradient is False, disable the kernel parameter gradient updates
@@ -128,13 +167,24 @@ def optimize_model(model,
128
167
  if optimizer == 'scipy':
129
168
  if method is None:
130
169
  method = 'L-BFGS-B'
170
+
171
+ if trace_fn == 'traceXu':
172
+ execute_task = TraceInducingPts(model)
173
+ task_group = gpflow.monitor.MonitorTaskGroup(execute_task,
174
+ period=1)
175
+ trace_fn = gpflow.monitor.Monitor(task_group)
176
+
131
177
  opt = gpflow.optimizers.Scipy()
132
178
  losses = opt.minimize(model.training_loss,
133
179
  trainable_variables,
134
180
  method=method,
135
181
  options=dict(disp=verbose, maxiter=max_steps),
136
- tol=tol)
137
- losses = losses.fun
182
+ tol=tol,
183
+ step_callback=trace_fn)
184
+ if trace_fn is None:
185
+ losses = losses.fun
186
+ else:
187
+ losses = trace_fn.task_groups[0].tasks[0].get_trace()
138
188
  else:
139
189
  if trace_fn is None:
140
190
  trace_fn = lambda x: x.loss
sgptools/utils/metrics.py CHANGED
@@ -131,12 +131,12 @@ def get_rmse(y_pred, y_test):
131
131
  """
132
132
  return np.sqrt(np.mean(np.square(y_pred - y_test)))
133
133
 
134
- def get_reconstruction(Xu, X_test, noise_variance, kernel):
134
+ def get_reconstruction(sensor_data, X_test, noise_variance, kernel):
135
135
  """Computes the GP-based data field estimates with the solution placements as the training set
136
136
 
137
137
  Args:
138
- Xu (tuple): (ndarray (m, d); ndarray (m, 1)); Sensing locations' input
139
- and corresponding ground truth labels
138
+ sensor_data (ndarray tuple): ((m, d), (m, 1)); Sensing locations' input
139
+ and corresponding ground truth labels
140
140
  X_test (ndarray): (n, d); Testing data input locations
141
141
  noise_variance (float): data variance
142
142
  kernel (gpflow.kernels.Kernel): gpflow kernel function
@@ -145,7 +145,7 @@ def get_reconstruction(Xu, X_test, noise_variance, kernel):
145
145
  y_pred (ndarray): (n, 1); Predicted data field estimates
146
146
  y_var (ndarray): (n, 1); Prediction variance at each location in the data field
147
147
  """
148
- Xu_X, Xu_y = Xu
148
+ Xu_X, Xu_y = sensor_data
149
149
 
150
150
  # Get the GP predictions
151
151
  gpr = gpflow.models.GPR((Xu_X, Xu_y),
sgptools/utils/misc.py CHANGED
@@ -51,7 +51,10 @@ def cont2disc(Xu, candidates, candidate_labels=None):
51
51
  """
52
52
  # Sanity check to ensure that there are sensing locations and candidates to match
53
53
  if len(candidates)==0 or len(Xu)==0:
54
- return []
54
+ if candidate_labels is not None:
55
+ return [], []
56
+ else:
57
+ return []
55
58
 
56
59
  dists = pairwise_distances(candidates, Y=Xu, metric='euclidean')
57
60
  row_ind, _ = linear_sum_assignment(dists)
@@ -83,6 +86,7 @@ def plot_paths(paths, candidates=None, title=None):
83
86
  plt.legend(bbox_to_anchor=(1.0, 1.02))
84
87
  if title is not None:
85
88
  plt.title(title)
89
+ plt.gca().set_aspect('equal')
86
90
  plt.xlabel('X')
87
91
  plt.ylabel('Y')
88
92
 
@@ -133,4 +137,4 @@ def project_waypoints(waypoints, candidates):
133
137
  """
134
138
  waypoints_disc = cont2disc(waypoints, candidates)
135
139
  waypoints_valid = _reoder_path(waypoints, waypoints_disc)
136
- return waypoints_valid
140
+ return waypoints_valid
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sgptools
3
- Version: 1.0.5
3
+ Version: 1.1.0
4
4
  Summary: Software Suite for Sensor Placement and Informative Path Planning
5
5
  Home-page: https://www.itskalvik.com/sgp-tools
6
6
  Author: Kalvik
@@ -0,0 +1,25 @@
1
+ sgptools/__init__.py,sha256=J4jZOdSzuUtaabg4TtYxxWfQwZkA0E_bVK3aaBPgPAc,449
2
+ sgptools/kernels/__init__.py,sha256=zRf4y-wJwjXKt1uOnmI5MbzCA6pRlyA7C-eagLfb3d0,190
3
+ sgptools/kernels/neural_kernel.py,sha256=9XEjcwwi1Gwj4D5cAZwq5QdWqMaI-Vu2DKgYO58DmPg,6709
4
+ sgptools/models/__init__.py,sha256=X2lIg9kf1-2MHUswk-VW2dHHcbSLxf6_IuV7lc_kvDc,682
5
+ sgptools/models/bo.py,sha256=sjs18oRXL-yoNiLoaaoROjaJXqfj_CwouJPe9HgzjL0,4857
6
+ sgptools/models/cma_es.py,sha256=LjWRcUIcARcFvAHR2F8prPDmgxLzYI0kRwYXzKp3APc,4861
7
+ sgptools/models/continuous_sgp.py,sha256=USf9fG1Pl-rQfcD_ffP6mB4mrh0F9RMUA7Lfhw9rc48,2940
8
+ sgptools/models/greedy_mi.py,sha256=06CY6tm9C3iBYEG_DOuQKDmWIww9Ah0rkeJUXsCR2YU,5018
9
+ sgptools/models/greedy_sgp.py,sha256=giddMbU3ohePTdLTcH4fDx-bS9upq1T_K8KUW_Ag6HI,4490
10
+ sgptools/models/core/__init__.py,sha256=TlUdvrM0A7vSzc5IM8C2Y2kliB1ip7YLEcHHzvuw-C4,482
11
+ sgptools/models/core/augmented_gpr.py,sha256=NuYwlggz7ho7pvW4-so3ghos5vZ8oK7nRZqvHpAt0Zk,3497
12
+ sgptools/models/core/augmented_sgpr.py,sha256=qMP9J4AnOUx9AEZfaPhoyb3RP_2AOhOUCUY4eh7uOi0,7185
13
+ sgptools/models/core/osgpr.py,sha256=gqliUdXdnt3fea206LP0rqGIggmIdKh8WP2DtFWzdBw,11798
14
+ sgptools/models/core/transformations.py,sha256=avluSP7g4EWjijlpFQYobO_CHKA0-V7PemC6SdVwrG8,18976
15
+ sgptools/utils/__init__.py,sha256=jgWqzSDgUbqOTFo8mkqZaTlyz44l3v2XYPJfcHYHjqM,376
16
+ sgptools/utils/data.py,sha256=oTXq4oRuzJdXpZC6frUfja8jhwy_ZdDDi7L1BYZcdQs,7309
17
+ sgptools/utils/gpflow.py,sha256=LnFYufnMW4ch7qsKnru53QUxEtIzJqE822qj6w8ssRg,8576
18
+ sgptools/utils/metrics.py,sha256=tu8H129n8GuxV5fQIKLcfzPUxd7sp8zEF9qZBOZjNKo,5834
19
+ sgptools/utils/misc.py,sha256=LdAFJS7-xubWpRnrgdLOorCa9vB_8vRrvL5cahxHYNA,5442
20
+ sgptools/utils/tsp.py,sha256=RJAQ4_uE7CUtR1ei3nSnGy-1kNhw82E9P_HyaCkc4iI,7007
21
+ sgptools-1.1.0.dist-info/LICENSE.txt,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
22
+ sgptools-1.1.0.dist-info/METADATA,sha256=4_pfsYY9aY2D8vw_gsb0EGoE2ErABQog-slYq1ThEQo,944
23
+ sgptools-1.1.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
24
+ sgptools-1.1.0.dist-info/top_level.txt,sha256=2NWH6uQLAOuLB9fG7o1pqf6Jvpe1_hEcuqfSqtUw3gw,9
25
+ sgptools-1.1.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (73.0.1)
2
+ Generator: setuptools (75.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,25 +0,0 @@
1
- sgptools/__init__.py,sha256=J4jZOdSzuUtaabg4TtYxxWfQwZkA0E_bVK3aaBPgPAc,449
2
- sgptools/kernels/__init__.py,sha256=zRf4y-wJwjXKt1uOnmI5MbzCA6pRlyA7C-eagLfb3d0,190
3
- sgptools/kernels/neural_kernel.py,sha256=9XEjcwwi1Gwj4D5cAZwq5QdWqMaI-Vu2DKgYO58DmPg,6709
4
- sgptools/models/__init__.py,sha256=X2lIg9kf1-2MHUswk-VW2dHHcbSLxf6_IuV7lc_kvDc,682
5
- sgptools/models/bo.py,sha256=RkcbD0t2d1gkZf7SMtMyymlYne9sFEaA4pjmgQhlTWY,3640
6
- sgptools/models/cma_es.py,sha256=URLndW_lb9zQj6XrxO6tnm9U6xld8jgsrqsg-Oo5vyw,8006
7
- sgptools/models/continuous_sgp.py,sha256=USf9fG1Pl-rQfcD_ffP6mB4mrh0F9RMUA7Lfhw9rc48,2940
8
- sgptools/models/greedy_mi.py,sha256=LJVXKHC58iJs1CyfAioAjCeK2o5hiViTchFxuiKj73o,4870
9
- sgptools/models/greedy_sgp.py,sha256=jgQexdEZ_z88iD6sM_D0NhJvBodQ9zindh9L7117q_8,4462
10
- sgptools/models/core/__init__.py,sha256=TlUdvrM0A7vSzc5IM8C2Y2kliB1ip7YLEcHHzvuw-C4,482
11
- sgptools/models/core/augmented_gpr.py,sha256=NuYwlggz7ho7pvW4-so3ghos5vZ8oK7nRZqvHpAt0Zk,3497
12
- sgptools/models/core/augmented_sgpr.py,sha256=qMP9J4AnOUx9AEZfaPhoyb3RP_2AOhOUCUY4eh7uOi0,7185
13
- sgptools/models/core/osgpr.py,sha256=gqliUdXdnt3fea206LP0rqGIggmIdKh8WP2DtFWzdBw,11798
14
- sgptools/models/core/transformations.py,sha256=NTj3yZjTQXTHBr24XFI_W5dh2-TVginisEiT6LPeMeg,16742
15
- sgptools/utils/__init__.py,sha256=jgWqzSDgUbqOTFo8mkqZaTlyz44l3v2XYPJfcHYHjqM,376
16
- sgptools/utils/data.py,sha256=5HX4YwxZX4Q1J6UekB9GARMVhjsF806kTh2eRCrU79I,6978
17
- sgptools/utils/gpflow.py,sha256=bnrtKl9tvKVh1Wz969KQJQf8UtApa7i0SkOj-KMOWNc,6816
18
- sgptools/utils/metrics.py,sha256=0tyMPoZgA-dRNs-Ye69paMNFpWgLuLNE1t8ywOnRmaE,5806
19
- sgptools/utils/misc.py,sha256=W8lx6n1eJeeiq6cyRGzhZMCnWItoiQzODHQkqVAQi4U,5322
20
- sgptools/utils/tsp.py,sha256=RJAQ4_uE7CUtR1ei3nSnGy-1kNhw82E9P_HyaCkc4iI,7007
21
- sgptools-1.0.5.dist-info/LICENSE.txt,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
22
- sgptools-1.0.5.dist-info/METADATA,sha256=GqxkIaxPKoQjZhyU2505xFvNs5xqBu40Ncah6BWFDm4,944
23
- sgptools-1.0.5.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
24
- sgptools-1.0.5.dist-info/top_level.txt,sha256=2NWH6uQLAOuLB9fG7o1pqf6Jvpe1_hEcuqfSqtUw3gw,9
25
- sgptools-1.0.5.dist-info/RECORD,,