sgptools 1.0.5__py3-none-any.whl → 1.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,17 +15,9 @@
15
15
  """Provides transforms to model complex sensor field of views and handle informative path planning
16
16
  """
17
17
 
18
- try:
19
- from tensorflow_graphics.math.interpolation import bspline
20
- except:
21
- pass
22
-
23
18
  import tensorflow as tf
24
19
  import numpy as np
25
20
 
26
- from scipy.optimize import linear_sum_assignment
27
- from sklearn.metrics import pairwise_distances
28
-
29
21
 
30
22
  class Transform:
31
23
  """Base class for transformations of the inducing points, including expansion and aggregation transforms.
@@ -98,84 +90,6 @@ class Transform:
98
90
  """
99
91
  return 0.
100
92
 
101
- def distance(self, Xu):
102
- """Computes the distance incured by sequentially visiting the inducing points
103
-
104
- Args:
105
- Xu (ndarray): Inducing points from which to compute the path length
106
-
107
- Returns:
108
- dist (float): path length
109
- """
110
- dist = tf.math.reduce_sum(tf.norm(Xu[1:]-Xu[:-1], axis=1))
111
- return dist
112
-
113
-
114
- class SquareTransform(Transform):
115
- """Non-point Transform to model a square FoV. Only works for single robot cases.
116
- ToDo: update expand function to handle multi-robot case.
117
-
118
- Args:
119
- length (float): Length of the square FoV
120
- num_side (int): Number of points along each side of the FoV
121
- """
122
- def __init__(self, length, num_side, **kwargs):
123
- super().__init__(**kwargs)
124
- self.length = length
125
- self.num_side = num_side
126
- self.length_factor=length/(self.num_side)
127
- self.num_length = int(length/self.length_factor)
128
-
129
- if self.aggregation_size == 0:
130
- self.aggregation_size = None
131
- elif self.aggregation_size is None:
132
- self.aggregation_size = num_side**2
133
-
134
- def expand(self, Xu):
135
- """Applies the expansion transformation to the inducing points
136
-
137
- Args:
138
- Xu (ndarray): (1, m, 3); Inducing points in the position and orientation space.
139
- `m` is the number of inducing points,
140
- `3` is the dimension of the space (x, y, angle in radians)
141
-
142
- Returns:
143
- Xu (ndarray): (mp, 2); Inducing points in input space.
144
- `p` is the number of points each inducing point is mapped
145
- to in order to form the FoV.
146
- """
147
- x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=2)
148
- x = tf.squeeze(x)
149
- y = tf.squeeze(y)
150
- theta = tf.squeeze(theta)
151
-
152
- points = []
153
- for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):
154
- points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2),
155
- (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)],
156
- [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2),
157
- (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)],
158
- self.num_side, axis=1))
159
- xy = tf.concat(points, axis=1)
160
- xy = tf.transpose(xy, [2, 1, 0])
161
- xy = tf.reshape(xy, [-1, 2])
162
- xy = self._reshape(xy, tf.shape(Xu)[1])
163
- return xy
164
-
165
- def _reshape(self, X, num_inducing):
166
- """Reorder the inducing points to be in the correct order for aggregation with square FoV.
167
-
168
- Args:
169
- X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each
170
- inducing point is mapped to in order to form the FoV.
171
-
172
- Returns:
173
- Xu (ndarray): (mp, 2); Reorder inducing points
174
- """
175
- X = tf.reshape(X, (num_inducing, -1, self.num_side, self.num_side, 2))
176
- X = tf.transpose(X, (0, 2, 1, 3, 4))
177
- X = tf.reshape(X, (-1, 2))
178
- return X
179
93
 
180
94
  class IPPTransform(Transform):
181
95
  """Transform to model IPP problems
@@ -184,6 +98,7 @@ class IPPTransform(Transform):
184
98
  * For point sensing, set `sampling_rate = 2`
185
99
  * For continuous sensing, set `sampling_rate > 2` (account for the information along the path)
186
100
  * For continuous sensing with aggregation, set `sampling_rate > 2` and `aggregate_fov = True` (faster but solution quality is a bit diminished)
101
+ * If using a non-point FoV model with continuous sampling, only the FoV inducing points are aggregated
187
102
  * For multi-robot case, set `num_robots > 1`
188
103
  * For onlineIPP use `update_fixed` to freeze the visited waypoints
189
104
 
@@ -192,7 +107,7 @@ class IPPTransform(Transform):
192
107
  distance_budget (float): Distance budget for the path
193
108
  num_robots (int): Number of robots
194
109
  Xu_fixed (ndarray): (num_robots, num_visited, num_dim); Visited waypoints that don't need to be optimized
195
- num_dim (int): Dimension of the data collection environment
110
+ num_dim (int): Number of dimensions of the inducing points
196
111
  sensor_model (Transform): Transform object to expand each inducing point to `p` points
197
112
  approximating each sensor's FoV
198
113
  aggregate_fov (bool): Used only when sampling_rate > 2, i.e., when using a continuous sensing model.
@@ -220,8 +135,11 @@ class IPPTransform(Transform):
220
135
 
221
136
  # Set aggregation size to sampling rate if aggregate_fov is True
222
137
  # and sampling rate is enabled (greater than 2)
223
- if aggregate_fov and sampling_rate > 2:
224
- self.aggregation_size = sampling_rate
138
+ if aggregate_fov:
139
+ if self.sensor_model is not None:
140
+ self.sensor_model.enable_aggregation()
141
+ elif sampling_rate > 2:
142
+ self.aggregation_size = sampling_rate
225
143
 
226
144
  # Initilize variable to store visited waypoints for onlineIPP
227
145
  if Xu_fixed is not None:
@@ -278,7 +196,10 @@ class IPPTransform(Transform):
278
196
  Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
279
197
 
280
198
  if self.sensor_model is not None:
281
- Xu = self.sensor_model.expand(Xu)
199
+ Xu_ = []
200
+ for i in range(self.num_robots):
201
+ Xu_.append(self.sensor_model.expand(Xu[i]))
202
+ Xu = tf.concat(Xu_, axis=0)
282
203
  return Xu
283
204
 
284
205
  Xu = tf.reshape(Xu, (-1, self.num_dim))
@@ -315,6 +236,7 @@ class IPPTransform(Transform):
315
236
  if self.distance_budget is None:
316
237
  return 0.
317
238
  else:
239
+ # Only do fixed points expansion transform
318
240
  Xu = self.expand(Xu, expand_sensor_model=False)
319
241
  dist = self.distance(Xu)-self.distance_budget
320
242
  dist = tf.reduce_sum(tf.nn.relu(dist))
@@ -323,39 +245,146 @@ class IPPTransform(Transform):
323
245
 
324
246
  def distance(self, Xu):
325
247
  """Computes the distance incured by sequentially visiting the inducing points
326
- ToDo: Change distance from 2d to nd. Currently limited to 2d
327
- to ensure the rotation angle is not included when using
328
- a square FoV sensor.
248
+ Args:
249
+ Xu (ndarray): (m, num_dim); Inducing points from which to compute the path lengths
250
+ `m` is the number of inducing points
251
+ `num_dim` dimension of the data collection environment
252
+ Returns:
253
+ dist (float or tensor of floats): path length(s)
254
+ """
255
+ Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
256
+ if self.sensor_model is not None:
257
+ dists = []
258
+ for i in range(self.num_robots):
259
+ dists.append(self.sensor_model.distance(Xu[i]))
260
+ dists = tf.concat(dists, axis=0)
261
+ return dists
262
+ else:
263
+ # Assumes 2D waypoints by default
264
+ dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)
265
+ dist = tf.reduce_sum(dist, axis=1)
266
+ return dist
267
+
268
+
269
+ class SquareTransform(Transform):
270
+ """Non-point Transform to model a square FoV. Only works for single robot cases.
271
+
272
+ Args:
273
+ length (float): Length of the square FoV
274
+ num_side (int): Number of points along each side of the FoV
275
+ aggregate_fov (bool): If `True`, covariances corresponding to interpolated inducing points used to
276
+ approximate the sensor FoV are aggregated to reduce the matrix inversion cost
277
+ """
278
+ def __init__(self, length, num_side, aggregate_fov=False, **kwargs):
279
+ super().__init__(**kwargs)
280
+ self.length = length
281
+ self.num_side = num_side
282
+ self.length_factor=length/(self.num_side)
283
+ self.num_length = int(length/self.length_factor)
284
+
285
+ if aggregate_fov:
286
+ self.enable_aggregation()
287
+
288
+ def enable_aggregation(self, size=None):
289
+ """Enable FoV covariance aggregation, which reduces the covariance matrix inversion cost by reducing the
290
+ covariance matrix size.
291
+
292
+ Args:
293
+ size (int): If None, all the interpolated inducing points within the FoV are aggregated. Alternatively,
294
+ the number of inducing points to aggregate can be explicitly defined using this variable.
295
+ """
296
+ if size is None:
297
+ self.aggregation_size = self.num_side**2
298
+ else:
299
+ self.aggregation_size = size
300
+
301
+ def expand(self, Xu):
302
+ """Applies the expansion transformation to the inducing points
303
+
304
+ Args:
305
+ Xu (ndarray): (m, 3); Inducing points in the position and orientation space.
306
+ `m` is the number of inducing points,
307
+ `3` is the dimension of the space (x, y, angle in radians)
308
+
309
+ Returns:
310
+ Xu (ndarray): (mp, 2); Inducing points in input space.
311
+ `p` is the number of points each inducing point is mapped
312
+ to in order to form the FoV.
313
+ """
314
+ x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=1)
315
+ x = tf.reshape(x, [-1,])
316
+ y = tf.reshape(y, [-1,])
317
+ theta = tf.reshape(theta, [-1,])
318
+
319
+ points = []
320
+ for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):
321
+ points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2),
322
+ (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)],
323
+ [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2),
324
+ (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)],
325
+ self.num_side, axis=1))
326
+ xy = tf.concat(points, axis=1)
327
+ xy = tf.transpose(xy, [2, 1, 0])
328
+ xy = self._reshape(xy)
329
+ return xy
330
+
331
+ def _reshape(self, X):
332
+ """Reorder the inducing points to be in the correct order for aggregation with square FoV.
329
333
 
330
334
  Args:
331
- Xu (ndarray): Inducing points from which to compute the path lengths
335
+ X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each
336
+ inducing point is mapped to in order to form the FoV.
337
+
338
+ Returns:
339
+ Xu (ndarray): (mp, 2); Reorder inducing points
340
+ """
341
+ X = tf.reshape(X, (-1, self.num_side, self.num_side, 2))
342
+ X = tf.transpose(X, (1, 0, 2, 3))
343
+ X = tf.reshape(X, (-1, 2))
344
+ return X
345
+
346
+ def distance(self, Xu):
347
+ """Computes the distance incured by sequentially visiting the inducing points
348
+ Args:
349
+ Xu (ndarray): (m, 3); Inducing points from which to compute the path lengths.
350
+ `m` is the number of inducing points.
332
351
 
333
352
  Returns:
334
353
  dist (float): path lengths
335
354
  """
336
- Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
337
- dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)
338
- dist = tf.reduce_sum(dist, axis=1)
355
+ Xu = tf.reshape(Xu, (-1, 3))[:, :2]
356
+ dist = tf.norm(Xu[1:] - Xu[:-1], axis=-1)
357
+ dist = tf.reduce_sum(dist, axis=0)
339
358
  return dist
340
-
359
+
341
360
 
342
361
  class SquareHeightTransform(Transform):
343
- """Non-point Transform to model a height-dependent square FoV. Only works for single robot cases.
344
- ToDo: Convert from single to multi-robot setup and make it compatible with IPPTransform
362
+ """Non-point Transform to model a height-dependent square FoV
345
363
 
346
364
  Args:
347
- num_points (int): Number of points along each side of the FoV
348
- distance_budget (float): Distance budget for the path
365
+ num_side (int): Number of points along each side of the FoV
366
+ aggregate_fov (bool): If `True`, covariances corresponding to interpolated inducing points used to
367
+ approximate the sensor FoV are aggregated to reduce the matrix inversion cost
349
368
  """
350
- def __init__(self, num_points, distance_budget=None, **kwargs):
369
+ def __init__(self, num_side, aggregate_fov=False, **kwargs):
351
370
  super().__init__(**kwargs)
352
- self.num_points = num_points
353
- self.distance_budget = distance_budget
354
-
355
- if self.aggregation_size == 0:
356
- self.aggregation_size = None
357
- elif self.aggregation_size is None:
358
- self.aggregation_size = num_points**2
371
+ self.num_side = num_side
372
+
373
+ if aggregate_fov:
374
+ self.enable_aggregation()
375
+
376
+ def enable_aggregation(self, size=None):
377
+ """Enable FoV covariance aggregation, which reduces the covariance matrix inversion cost by reducing the
378
+ covariance matrix size.
379
+
380
+ Args:
381
+ size (int): If None, all the interpolated inducing points within the FoV are aggregated. Alternatively,
382
+ the number of inducing points to aggregate can be explicitly defined using this variable.
383
+ """
384
+ if size is None:
385
+ self.aggregation_size = self.num_side**2
386
+ else:
387
+ self.aggregation_size = size
359
388
 
360
389
  def expand(self, Xu):
361
390
  """
@@ -372,17 +401,17 @@ class SquareHeightTransform(Transform):
372
401
  to in order to form the FoV.
373
402
  """
374
403
  x, y, h = tf.split(Xu, num_or_size_splits=3, axis=1)
375
- x = tf.squeeze(x)
376
- y = tf.squeeze(y)
377
- h = tf.squeeze(h)
404
+ x = tf.reshape(x, [-1,])
405
+ y = tf.reshape(y, [-1,])
406
+ h = tf.reshape(h, [-1,])
378
407
 
379
- delta = h / (self.num_points - 1)
408
+ delta = h / (self.num_side - 1)
380
409
 
381
410
  pts = []
382
- for i in range(self.num_points):
411
+ for i in range(self.num_side):
383
412
  pts.append(tf.linspace([x - h/2, y - (h/2) + (delta * i)],
384
413
  [x + h/2, y - (h/2) + (delta * i)],
385
- self.num_points,
414
+ self.num_side,
386
415
  axis=1))
387
416
  xy = tf.concat(pts, axis=1)
388
417
  xy = tf.transpose(xy, [2, 1, 0])
@@ -400,7 +429,22 @@ class SquareHeightTransform(Transform):
400
429
  Returns:
401
430
  Xu (ndarray): (mp, 2); Reorder inducing points
402
431
  """
403
- X = tf.reshape(X, (num_inducing, -1, self.num_points, self.num_points, 2))
432
+ X = tf.reshape(X, (num_inducing, -1, self.num_side, self.num_side, 2))
404
433
  X = tf.transpose(X, (0, 2, 1, 3, 4))
405
434
  X = tf.reshape(X, (-1, 2))
406
435
  return X
436
+
437
+ def distance(self, Xu):
438
+ """Computes the distance incured by sequentially visiting the inducing points
439
+ Args:
440
+ Xu (ndarray): (m, 3); Inducing points from which to compute the path lengths.
441
+ `m` is the number of inducing points.
442
+
443
+ Returns:
444
+ dist (float): path lengths
445
+ """
446
+ Xu = tf.reshape(Xu, (-1, 3))
447
+ dist = tf.norm(Xu[1:] - Xu[:-1], axis=-1)
448
+ dist = tf.reduce_sum(dist, axis=0)
449
+ return dist
450
+
sgptools/utils/data.py CHANGED
@@ -91,8 +91,8 @@ def prep_tif_dataset(dataset_path):
91
91
  dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.
92
92
 
93
93
  Returns:
94
- X: (n, d); Dataset input features
95
- y: (n, 1); Dataset labels
94
+ X (ndarray): (n, d); Dataset input features
95
+ y (ndarray): (n, 1); Dataset labels
96
96
  '''
97
97
  data = PIL.Image.open(dataset_path)
98
98
  data = np.array(data)
@@ -116,22 +116,31 @@ def prep_tif_dataset(dataset_path):
116
116
 
117
117
  ####################################################
118
118
 
119
- def prep_synthetic_dataset():
119
+ def prep_synthetic_dataset(shape=(50, 50),
120
+ min_height=0.0,
121
+ max_height=30.0,
122
+ roughness=0.5,
123
+ **kwargs):
120
124
  '''Generates a 50x50 grid of synthetic elevation data using the diamond square algorithm.
121
125
 
122
126
  Refer to the following repo for more details:
123
127
  - [https://github.com/buckinha/DiamondSquare](https://github.com/buckinha/DiamondSquare)
124
128
 
125
129
  Args:
130
+ shape (tuple): (x, y); Grid size along the x and y axis
131
+ min_height (float): Minimum allowed height in the sampled data
132
+ max_height (float): Maximum allowed height in the sampled data
133
+ roughness (float): Roughness of the sampled data
126
134
 
127
135
  Returns:
128
- X: (n, d); Dataset input features
129
- y: (n, 1); Dataset labels
136
+ X (ndarray): (n, d); Dataset input features
137
+ y (ndarray): (n, 1); Dataset labels
130
138
  '''
131
- data = diamond_square(shape=(50,50),
132
- min_height=0,
133
- max_height=30,
134
- roughness=0.5)
139
+ data = diamond_square(shape=shape,
140
+ min_height=min_height,
141
+ max_height=max_height,
142
+ roughness=roughness,
143
+ **kwargs)
135
144
 
136
145
  # create x and y coordinates from the extent
137
146
  x_coords = np.arange(0, data.shape[0])/10
@@ -145,17 +154,16 @@ def prep_synthetic_dataset():
145
154
 
146
155
  ####################################################
147
156
 
148
- def get_dataset(dataset_type, dataset_path=None,
157
+ def get_dataset(dataset_path=None,
149
158
  num_train=1000,
150
159
  num_test=2500,
151
- num_candidates=150):
160
+ num_candidates=150,
161
+ **kwargs):
152
162
  """Method to generate/load datasets and preprocess them for SP/IPP. The method uses kmeans to
153
163
  generate train and test sets.
154
164
 
155
165
  Args:
156
- dataset_type (str): 'tif' or 'synthetic'. 'tif' will load and proprocess data from a GeoTIFF file.
157
- 'synthetic' will use the diamond square algorithm to generate synthetic elevation data.
158
- dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.
166
+ dataset_path (str): Path to a tif dataset file. If None, the method will generate synthetic data.
159
167
  num_train (int): Number of training samples to generate.
160
168
  num_test (int): Number of testing samples to generate.
161
169
  num_candidates (int): Number of candidate locations to generate.
@@ -170,10 +178,10 @@ def get_dataset(dataset_type, dataset_path=None,
170
178
  y (ndarray): (n, 1); Full dataset labels
171
179
  """
172
180
  # Load the data
173
- if dataset_type == 'tif':
181
+ if dataset_path is not None:
174
182
  X, y = prep_tif_dataset(dataset_path=dataset_path)
175
- elif dataset_type == 'synthetic':
176
- X, y = prep_synthetic_dataset()
183
+ else:
184
+ X, y = prep_synthetic_dataset(**kwargs)
177
185
 
178
186
  X_train = get_inducing_pts(X, num_train)
179
187
  X_train, y_train = cont2disc(X_train, X, y)
sgptools/utils/gpflow.py CHANGED
@@ -18,6 +18,7 @@ from gpflow.utilities.traversal import print_summary
18
18
  import tensorflow as tf
19
19
  import tensorflow_probability as tfp
20
20
 
21
+ import numpy as np
21
22
  import matplotlib.pyplot as plt
22
23
 
23
24
 
@@ -87,6 +88,43 @@ def get_model_params(X_train, y_train,
87
88
 
88
89
  return loss, gpr_gt.likelihood.variance, kernel
89
90
 
91
+
92
+ class TraceInducingPts(gpflow.monitor.MonitorTask):
93
+ '''
94
+ GPflow monitoring task, used to trace the inducing points
95
+ states at every step during optimization.
96
+
97
+ Args:
98
+ model (gpflow.models.sgpr): GPflow GP/SGP model
99
+ '''
100
+ def __init__(self, model):
101
+ super().__init__()
102
+ self.trace = []
103
+ self.model = model
104
+
105
+ def run(self, **kwargs):
106
+ '''
107
+ Method used to extract the inducing points and
108
+ apply IPP fixed points transform if available
109
+ '''
110
+ Xu = self.model.inducing_variable.Z
111
+ Xu_exp = self.model.transform.expand(Xu,
112
+ expand_sensor_model=False).numpy()
113
+ self.trace.append(Xu_exp)
114
+
115
+ def get_trace(self):
116
+ '''
117
+ Returns the inducing points collected at each optimization step
118
+
119
+ Returns:
120
+ trace (ndarray): (n, m, d); Array with the inducing points.
121
+ `n` is the number of optimization steps;
122
+ `m` is the number of inducing points;
123
+ `d` is the dimension of the inducing points.
124
+ '''
125
+ return np.array(self.trace)
126
+
127
+
90
128
  def optimize_model(model,
91
129
  max_steps=2000,
92
130
  kernel_grad=True,
@@ -102,21 +140,22 @@ def optimize_model(model,
102
140
  Trains a GP/SGP model
103
141
 
104
142
  Args:
105
- model (gpflow.models): GPflow GP/SGP model to train
106
- max_steps (int): Maximum number of training steps
107
- kernel_grad (bool): If False, the kernel parameters will not be optimized
108
- lr (float): Optimization learning rate
109
- optimizer (str): Optimizer to use for training (`scipy` or `tf`)
143
+ model (gpflow.models): GPflow GP/SGP model to train.
144
+ max_steps (int): Maximum number of training steps.
145
+ kernel_grad (bool): If `False`, the kernel parameters will not be optimized.
146
+ Ignored when `trainable_variables` are passed.
147
+ lr (float): Optimization learning rate.
148
+ optimizer (str): Optimizer to use for training (`scipy` or `tf`).
110
149
  method (str): Optimization method refer to [scipy minimize](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize)
111
150
  and [tf optimizers](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers) for full list
112
- verbose (bool): If true, the training progress will be printed
151
+ verbose (bool): If `True`, the training progress will be printed when using Scipy.
113
152
  trace_fn (str): Function to trace metrics during training.
114
- If `None`, the loss values are traced;
115
- if `traceXu`, it the inducing points states at each optimization step are traced
116
- convergence_criterion (bool): It True, enables early stopping when the loss plateaus
117
- trainable_variables (list): List of model variables to train
118
- (can be used to limit training to a subset of variables)
119
- tol (float): Convergence tolerance to decide when to stop optimization
153
+ If `None`, the loss values are returned;
154
+ If `traceXu`, it the inducing points states at each optimization step are returned (increases computation time).
155
+ convergence_criterion (bool): If `True` and using a tensorflow optimizer, it
156
+ enables early stopping when the loss plateaus.
157
+ trainable_variables (list): List of model variables to train.
158
+ tol (float): Convergence tolerance to decide when to stop optimization.
120
159
  """
121
160
  # Train all variables if trainable_variables are not provided
122
161
  # If kernel_gradient is False, disable the kernel parameter gradient updates
@@ -128,13 +167,24 @@ def optimize_model(model,
128
167
  if optimizer == 'scipy':
129
168
  if method is None:
130
169
  method = 'L-BFGS-B'
170
+
171
+ if trace_fn == 'traceXu':
172
+ execute_task = TraceInducingPts(model)
173
+ task_group = gpflow.monitor.MonitorTaskGroup(execute_task,
174
+ period=1)
175
+ trace_fn = gpflow.monitor.Monitor(task_group)
176
+
131
177
  opt = gpflow.optimizers.Scipy()
132
178
  losses = opt.minimize(model.training_loss,
133
179
  trainable_variables,
134
180
  method=method,
135
181
  options=dict(disp=verbose, maxiter=max_steps),
136
- tol=tol)
137
- losses = losses.fun
182
+ tol=tol,
183
+ step_callback=trace_fn)
184
+ if trace_fn is None:
185
+ losses = losses.fun
186
+ else:
187
+ losses = trace_fn.task_groups[0].tasks[0].get_trace()
138
188
  else:
139
189
  if trace_fn is None:
140
190
  trace_fn = lambda x: x.loss
sgptools/utils/metrics.py CHANGED
@@ -131,12 +131,12 @@ def get_rmse(y_pred, y_test):
131
131
  """
132
132
  return np.sqrt(np.mean(np.square(y_pred - y_test)))
133
133
 
134
- def get_reconstruction(Xu, X_test, noise_variance, kernel):
134
+ def get_reconstruction(sensor_data, X_test, noise_variance, kernel):
135
135
  """Computes the GP-based data field estimates with the solution placements as the training set
136
136
 
137
137
  Args:
138
- Xu (tuple): (ndarray (m, d); ndarray (m, 1)); Sensing locations' input
139
- and corresponding ground truth labels
138
+ sensor_data (ndarray tuple): ((m, d), (m, 1)); Sensing locations' input
139
+ and corresponding ground truth labels
140
140
  X_test (ndarray): (n, d); Testing data input locations
141
141
  noise_variance (float): data variance
142
142
  kernel (gpflow.kernels.Kernel): gpflow kernel function
@@ -145,7 +145,7 @@ def get_reconstruction(Xu, X_test, noise_variance, kernel):
145
145
  y_pred (ndarray): (n, 1); Predicted data field estimates
146
146
  y_var (ndarray): (n, 1); Prediction variance at each location in the data field
147
147
  """
148
- Xu_X, Xu_y = Xu
148
+ Xu_X, Xu_y = sensor_data
149
149
 
150
150
  # Get the GP predictions
151
151
  gpr = gpflow.models.GPR((Xu_X, Xu_y),
sgptools/utils/misc.py CHANGED
@@ -51,7 +51,10 @@ def cont2disc(Xu, candidates, candidate_labels=None):
51
51
  """
52
52
  # Sanity check to ensure that there are sensing locations and candidates to match
53
53
  if len(candidates)==0 or len(Xu)==0:
54
- return []
54
+ if candidate_labels is not None:
55
+ return [], []
56
+ else:
57
+ return []
55
58
 
56
59
  dists = pairwise_distances(candidates, Y=Xu, metric='euclidean')
57
60
  row_ind, _ = linear_sum_assignment(dists)
@@ -133,4 +136,4 @@ def project_waypoints(waypoints, candidates):
133
136
  """
134
137
  waypoints_disc = cont2disc(waypoints, candidates)
135
138
  waypoints_valid = _reoder_path(waypoints, waypoints_disc)
136
- return waypoints_valid
139
+ return waypoints_valid
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sgptools
3
- Version: 1.0.5
3
+ Version: 1.0.6
4
4
  Summary: Software Suite for Sensor Placement and Informative Path Planning
5
5
  Home-page: https://www.itskalvik.com/sgp-tools
6
6
  Author: Kalvik
@@ -11,15 +11,15 @@ sgptools/models/core/__init__.py,sha256=TlUdvrM0A7vSzc5IM8C2Y2kliB1ip7YLEcHHzvuw
11
11
  sgptools/models/core/augmented_gpr.py,sha256=NuYwlggz7ho7pvW4-so3ghos5vZ8oK7nRZqvHpAt0Zk,3497
12
12
  sgptools/models/core/augmented_sgpr.py,sha256=qMP9J4AnOUx9AEZfaPhoyb3RP_2AOhOUCUY4eh7uOi0,7185
13
13
  sgptools/models/core/osgpr.py,sha256=gqliUdXdnt3fea206LP0rqGIggmIdKh8WP2DtFWzdBw,11798
14
- sgptools/models/core/transformations.py,sha256=NTj3yZjTQXTHBr24XFI_W5dh2-TVginisEiT6LPeMeg,16742
14
+ sgptools/models/core/transformations.py,sha256=avluSP7g4EWjijlpFQYobO_CHKA0-V7PemC6SdVwrG8,18976
15
15
  sgptools/utils/__init__.py,sha256=jgWqzSDgUbqOTFo8mkqZaTlyz44l3v2XYPJfcHYHjqM,376
16
- sgptools/utils/data.py,sha256=5HX4YwxZX4Q1J6UekB9GARMVhjsF806kTh2eRCrU79I,6978
17
- sgptools/utils/gpflow.py,sha256=bnrtKl9tvKVh1Wz969KQJQf8UtApa7i0SkOj-KMOWNc,6816
18
- sgptools/utils/metrics.py,sha256=0tyMPoZgA-dRNs-Ye69paMNFpWgLuLNE1t8ywOnRmaE,5806
19
- sgptools/utils/misc.py,sha256=W8lx6n1eJeeiq6cyRGzhZMCnWItoiQzODHQkqVAQi4U,5322
16
+ sgptools/utils/data.py,sha256=oTXq4oRuzJdXpZC6frUfja8jhwy_ZdDDi7L1BYZcdQs,7309
17
+ sgptools/utils/gpflow.py,sha256=LnFYufnMW4ch7qsKnru53QUxEtIzJqE822qj6w8ssRg,8576
18
+ sgptools/utils/metrics.py,sha256=tu8H129n8GuxV5fQIKLcfzPUxd7sp8zEF9qZBOZjNKo,5834
19
+ sgptools/utils/misc.py,sha256=_6hZRoSnbnKCt9H3XRlPT74Wh5VO5zArFddE1dszHGo,5408
20
20
  sgptools/utils/tsp.py,sha256=RJAQ4_uE7CUtR1ei3nSnGy-1kNhw82E9P_HyaCkc4iI,7007
21
- sgptools-1.0.5.dist-info/LICENSE.txt,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
22
- sgptools-1.0.5.dist-info/METADATA,sha256=GqxkIaxPKoQjZhyU2505xFvNs5xqBu40Ncah6BWFDm4,944
23
- sgptools-1.0.5.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
24
- sgptools-1.0.5.dist-info/top_level.txt,sha256=2NWH6uQLAOuLB9fG7o1pqf6Jvpe1_hEcuqfSqtUw3gw,9
25
- sgptools-1.0.5.dist-info/RECORD,,
21
+ sgptools-1.0.6.dist-info/LICENSE.txt,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
22
+ sgptools-1.0.6.dist-info/METADATA,sha256=fdUW1EPBQkGG-rKXi6xVa8i4Qt5gIwKDr_8MPsI-Lh4,944
23
+ sgptools-1.0.6.dist-info/WHEEL,sha256=ixB2d4u7mugx_bCBycvM9OzZ5yD7NmPXFRtKlORZS2Y,91
24
+ sgptools-1.0.6.dist-info/top_level.txt,sha256=2NWH6uQLAOuLB9fG7o1pqf6Jvpe1_hEcuqfSqtUw3gw,9
25
+ sgptools-1.0.6.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (73.0.1)
2
+ Generator: setuptools (74.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5