sgptools 1.0.6__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sgptools/__init__.py CHANGED
@@ -12,7 +12,7 @@ The library includes python code for the following:
12
12
 
13
13
  """
14
14
 
15
- __version__ = "1.0.1"
15
+ __version__ = "1.1.0"
16
16
  __author__ = 'Kalvik'
17
17
 
18
18
  from .models.core import *
sgptools/models/bo.py CHANGED
@@ -29,67 +29,90 @@ class BayesianOpt:
29
29
  X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
30
30
  noise_variance (float): data variance
31
31
  kernel (gpflow.kernels.Kernel): gpflow kernel function
32
+ transform (Transform): Transform object
32
33
  """
33
- def __init__(self, X_train, noise_variance, kernel):
34
+ def __init__(self, X_train, noise_variance, kernel,
35
+ transform=None):
34
36
  self.X_train = X_train
35
37
  self.noise_variance = noise_variance
36
38
  self.kernel = kernel
37
39
  self.num_dim = X_train.shape[-1]
38
-
40
+ self.transform = transform
41
+
39
42
  # use the boundaries of the region as the search space
40
43
  self.pbounds_dim = []
41
44
  for i in range(self.num_dim):
42
45
  self.pbounds_dim.append((np.min(X_train[:, i]), np.max(X_train[:, i])))
43
-
46
+
44
47
  def objective(self, **kwargs):
45
- """Computes the objective function (mutual information) for the sensor placement problem
48
+ """Objective function (GP-based Mutual Information)
49
+
50
+ Args:
51
+ x<i> (ndarray): (1, d); Current solution sensor placement location i
46
52
  """
53
+ # MI does not depend on waypoint order (reshape to -1, num_dim)
47
54
  X = []
48
55
  for i in range(len(kwargs)):
49
56
  X.append(kwargs['x{}'.format(i)])
50
57
  X = np.array(X).reshape(-1, self.num_dim)
51
- return -get_mi(X, self.noise_variance, self.kernel, self.X_train)
52
-
58
+ if self.transform is not None:
59
+ X = self.transform.expand(X)
60
+ constraints_loss = self.transform.constraints(X)
61
+
62
+ try:
63
+ mi = get_mi(X, self.X_train, self.noise_variance, self.kernel)
64
+ mi += constraints_loss
65
+ mi = mi.numpy()
66
+ except:
67
+ mi = -1e4 # if the cholskey decomposition fails
68
+ return mi
69
+
53
70
  def optimize(self,
54
71
  num_sensors=10,
55
72
  max_steps=100,
56
73
  X_init=None,
57
- init_points=10):
74
+ init_points=10,
75
+ verbose=0,
76
+ seed=1234):
58
77
  """Optimizes the sensor placements using Bayesian Optimization without any constraints
59
78
 
60
79
  Args:
61
- num_sensors (int): Number of sensor locations to optimize
62
- max_steps (int): Maximum number of optimization steps
63
- X_init (ndarray): (m, d); Initial inducing points
64
- init_points (int): How many steps of random exploration you want to perform.
80
+ num_sensors (int): Number of sensor locations to optimize.
81
+ max_steps (int): Maximum number of optimization steps.
82
+ X_init (ndarray): (m, d); Initial inducing points.
83
+ init_points (int): Number of random solutions used for initial exploration.
65
84
  Random exploration can help by diversifying the exploration space.
85
+ verbose (int): The level of verbosity.
86
+ seed (int): The algorithm will use it to seed the randomnumber generator, ensuring replicability.
66
87
 
67
88
  Returns:
68
89
  Xu (ndarray): (m, d); Solution sensor placement locations
69
90
  """
70
91
  if X_init is None:
71
92
  X_init = get_inducing_pts(self.X_train, num_sensors, random=True)
93
+ else:
94
+ num_sensors = len(X_init.reshape(-1, self.num_dim))
72
95
  X_init = X_init.reshape(-1)
73
96
 
74
97
  pbounds = {}
75
98
  for i in range(self.num_dim*num_sensors):
76
99
  pbounds['x{}'.format(i)] = self.pbounds_dim[i%self.num_dim]
77
100
 
78
- optimizer = BayesianOptimization(
79
- f=self.objective,
80
- pbounds=pbounds,
81
- verbose=0,
82
- random_state=1,
83
- allow_duplicate_points=True
84
- )
85
-
86
- optimizer.maximize(
87
- init_points=init_points,
88
- n_iter=max_steps,
89
- )
101
+ optimizer = BayesianOptimization(f=self.objective,
102
+ pbounds=pbounds,
103
+ verbose=verbose,
104
+ random_state=seed,
105
+ allow_duplicate_points=True)
106
+ optimizer.maximize(init_points=init_points,
107
+ n_iter=max_steps)
90
108
 
91
109
  sol = []
92
110
  for i in range(self.num_dim*num_sensors):
93
111
  sol.append(optimizer.max['params']['x{}'.format(i)])
94
- return np.array(sol).reshape(-1, self.num_dim)
95
-
112
+ sol = np.array(sol).reshape(-1, self.num_dim)
113
+ if self.transform is not None:
114
+ sol = self.transform.expand(sol,
115
+ expand_sensor_model=False)
116
+ if not isinstance(sol, np.ndarray):
117
+ sol = sol.numpy()
118
+ return sol.reshape(-1, self.num_dim)
sgptools/models/cma_es.py CHANGED
@@ -57,48 +57,24 @@ class CMA_ES:
57
57
  """
58
58
  self.noise_variance = noise_variance
59
59
  self.kernel = kernel
60
-
61
- def constraint(self, X):
62
- """Constraint function for the optimization problem (constraint to limit the boundary of the region)
63
- Does not work well with CMA-ES as it is a step function and is not continuous
64
-
65
- Args:
66
- X (ndarray): (n, d); Current sensor placement locations
67
- """
68
- X = np.array(X).reshape(-1, self.num_dim)
69
- lagrangian = [self.boundaries.contains(geometry.Point(x[0], x[1])) for x in X]
70
- lagrangian = np.logical_not(lagrangian).astype(float)
71
- return lagrangian
72
-
73
- def distance_constraint(self, X):
74
- """Constraint function for the optimization problem (constraint to limit the total travel distance)
75
- Does not work well with CMA-ES as it is a step function and is not continuous
76
-
77
- Args:
78
- X (ndarray): (n, d); Current sensor placement locations
79
- """
80
- X = np.array(X).reshape(self.num_robots, -1, self.num_dim)
81
- dists = np.linalg.norm(X[:, 1:] - X[:, :-1], axis=-1)
82
- lagrangian = dists - self.distance_budget
83
- lagrangian_mask = np.logical_not(lagrangian <= 0)
84
- lagrangian[lagrangian_mask] = 0
85
- lagrangian = np.sum(lagrangian)
86
- return lagrangian
87
60
 
88
61
  def objective(self, X):
89
62
  """Objective function (GP-based Mutual Information)
90
63
 
91
64
  Args:
92
- X (ndarray): (n, d); Initial sensor placement locations
65
+ X (ndarray): (n, d); Current solution sensor placement locations
93
66
  """
94
67
  # MI does not depend on waypoint order (reshape to -1, num_dim)
95
68
  X = np.array(X).reshape(-1, self.num_dim)
69
+ constraints_loss = 0.0
96
70
  if self.transform is not None:
97
- X = self.transform.expand(X,
98
- expand_sensor_model=False).numpy()
71
+ X = self.transform.expand(X)
72
+ constraints_loss = self.transform.constraints(X)
99
73
 
100
74
  try:
101
- mi = -get_mi(X, self.noise_variance, self.kernel, self.X_train)
75
+ mi = -get_mi(X, self.X_train, self.noise_variance, self.kernel)
76
+ mi -= constraints_loss
77
+ mi = mi.numpy()
102
78
  except:
103
79
  mi = 0.0 # if the cholskey decomposition fails
104
80
  return mi
@@ -106,16 +82,20 @@ class CMA_ES:
106
82
  def optimize(self,
107
83
  num_sensors=10,
108
84
  max_steps=5000,
109
- tol=1e-11,
110
- X_init=None):
111
- """Optimizes the SP objective function using CMA-ES without any constraints
85
+ tol=1e-6,
86
+ X_init=None,
87
+ verbose=0,
88
+ seed=1234):
89
+ """Optimizes the sensor placements using CMA-ES without any constraints
112
90
 
113
91
  Args:
114
92
  num_sensors (int): Number of sensor locations to optimize
115
93
  max_steps (int): Maximum number of optimization steps
116
94
  tol (float): Convergence tolerance to decide when to stop optimization
117
95
  X_init (ndarray): (m, d); Initial inducing points
118
-
96
+ verbose (int): The level of verbosity.
97
+ seed (int): The algorithm will use it to seed the randomnumber generator, ensuring replicability.
98
+
119
99
  Returns:
120
100
  Xu (ndarray): (m, d); Solution sensor placement locations
121
101
  """
@@ -127,63 +107,15 @@ class CMA_ES:
127
107
 
128
108
  xopt, _ = cma.fmin2(self.objective, X_init, sigma0,
129
109
  options={'maxfevals': max_steps,
130
- 'verb_disp': 0,
110
+ 'verb_disp': verbose,
131
111
  'tolfun': tol,
132
- 'seed': 1234},
112
+ 'seed': seed},
133
113
  restarts=5)
134
114
 
135
115
  xopt = np.array(xopt).reshape(-1, self.num_dim)
136
116
  if self.transform is not None:
137
117
  xopt = self.transform.expand(xopt,
138
- expand_sensor_model=False).numpy()
139
-
140
- return xopt.reshape(-1, self.num_dim)
141
-
142
- def doptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
143
- """Optimizes the SP objective function using CMA-ES with a distance budget constraint
144
-
145
- Args:
146
- num_sensors (int): Number of sensor locations to optimize
147
- max_steps (int): Maximum number of optimization steps
148
- tol (float): Convergence tolerance to decide when to stop optimization
149
-
150
- Returns:
151
- Xu (ndarray): (m, d); Solution sensor placement locations
152
- """
153
- sigma0 = 1.0
154
- idx = np.random.randint(len(self.X_train), size=num_sensors)
155
- x_init = self.X_train[idx].reshape(-1)
156
- cfun = cma.ConstrainedFitnessAL(self.objective, self.distance_constraint)
157
- xopt, _ = cma.fmin2(cfun, x_init, sigma0,
158
- options={'maxfevals': max_steps,
159
- 'verb_disp': 0,
160
- 'tolfun': tol,
161
- 'seed': 1234},
162
- callback=cfun.update,
163
- restarts=5)
164
- return xopt.reshape(-1, self.num_dim)
165
-
166
- def coptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
167
- """Optimizes the SP objective function using CMA-ES with the constraints
168
- to ensure that the sensors are placed within the boundaries of the region
169
-
170
- Args:
171
- num_sensors (int): Number of sensor locations to optimize
172
- max_steps (int): Maximum number of optimization steps
173
- tol (float): Convergence tolerance to decide when to stop optimization
174
-
175
- Returns:
176
- Xu (ndarray): (m, d); Solution sensor placement locations
177
- """
178
- sigma0 = 1.0
179
- idx = np.random.randint(len(self.X_train), size=num_sensors*self.num_robots)
180
- x_init = self.X_train[idx].reshape(-1)
181
- cfun = cma.ConstrainedFitnessAL(self.objective, self.constraint)
182
- xopt, _ = cma.fmin2(cfun, x_init, sigma0,
183
- options={'maxfevals': max_steps,
184
- 'verb_disp': 0,
185
- 'tolfun': tol,
186
- 'seed': 1234},
187
- callback=cfun.update,
188
- restarts=5)
189
- return xopt.reshape(-1, self.num_dim)
118
+ expand_sensor_model=False)
119
+ if not isinstance(xopt, np.ndarray):
120
+ xopt = xopt.numpy()
121
+ return xopt.reshape(-1, self.num_dim)
@@ -159,7 +159,6 @@ class IPPTransform(Transform):
159
159
  if self.Xu_fixed is not None:
160
160
  self.Xu_fixed.assign(Xu_fixed)
161
161
  else:
162
- # ToDo: Use binary mask of fixed size to avoid retracing
163
162
  self.Xu_fixed = tf.Variable(Xu_fixed,
164
163
  shape=tf.TensorShape(None),
165
164
  trainable=False)
@@ -325,24 +324,9 @@ class SquareTransform(Transform):
325
324
  self.num_side, axis=1))
326
325
  xy = tf.concat(points, axis=1)
327
326
  xy = tf.transpose(xy, [2, 1, 0])
328
- xy = self._reshape(xy)
327
+ xy = tf.reshape(xy, (-1, 2))
329
328
  return xy
330
329
 
331
- def _reshape(self, X):
332
- """Reorder the inducing points to be in the correct order for aggregation with square FoV.
333
-
334
- Args:
335
- X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each
336
- inducing point is mapped to in order to form the FoV.
337
-
338
- Returns:
339
- Xu (ndarray): (mp, 2); Reorder inducing points
340
- """
341
- X = tf.reshape(X, (-1, self.num_side, self.num_side, 2))
342
- X = tf.transpose(X, (1, 0, 2, 3))
343
- X = tf.reshape(X, (-1, 2))
344
- return X
345
-
346
330
  def distance(self, Xu):
347
331
  """Computes the distance incured by sequentially visiting the inducing points
348
332
  Args:
@@ -12,8 +12,8 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from .core.augmented_gpr import AugmentedGPR
16
15
  from apricot import CustomSelection
16
+ from gpflow.models.gpr import GPR
17
17
  import numpy as np
18
18
 
19
19
 
@@ -32,15 +32,24 @@ class GreedyMI:
32
32
  kernel (gpflow.kernels.Kernel): gpflow kernel function
33
33
  transform (Transform): Transform object
34
34
  """
35
- def __init__(self, S, V, noise_variance, kernel, transform=None):
35
+ def __init__(self, S, V, noise_variance, kernel,
36
+ transform=None):
36
37
  self.S = S
37
38
  self.V = V
38
39
  self.kernel = kernel
39
40
  self.input_dim = S.shape[1]
40
41
  self.noise_variance = noise_variance
41
42
  self.transform = transform
42
-
43
+
43
44
  def mutual_info(self, x):
45
+ """Computes mutual information using the points `x`
46
+
47
+ Args:
48
+ x (ndarray): (n); Indices of the solution placement locations
49
+
50
+ Returns:
51
+ MI (float): Mutual information between the placement x and candidate locations
52
+ """
44
53
  x = np.array(x).reshape(-1).astype(int)
45
54
  A = self.S[x[:-1]].reshape(-1, self.input_dim)
46
55
  y = self.S[x[-1]].reshape(-1, self.input_dim)
@@ -50,26 +59,27 @@ class GreedyMI:
50
59
  else:
51
60
  if self.transform is not None:
52
61
  A = self.transform.expand(A)
53
- a_gp = AugmentedGPR(data=(A, np.zeros((len(A), 1))),
54
- kernel=self.kernel,
55
- noise_variance=self.noise_variance,
56
- transform=self.transform)
57
- _, sigma_a = a_gp.predict_f(y, aggregate_train=True)
62
+ a_gp = GPR(data=(A, np.zeros((len(A), 1))),
63
+ kernel=self.kernel,
64
+ noise_variance=self.noise_variance)
65
+ _, sigma_a = a_gp.predict_f(y)
58
66
 
59
- # Remove locations in A to build A bar
67
+ # Remove locations in A∪y from V to build A bar (Refer to Krause et al., 2008)
60
68
  V_ = self.V.copy()
61
69
  V_rows = V_.view([('', V_.dtype)] * V_.shape[1])
70
+
62
71
  if self.transform is not None:
63
- A_ = self.transform.expand(self.S[x]).numpy()
72
+ solution = self.S[x].reshape(-1, self.input_dim)
73
+ A_ = self.transform.expand(solution)
64
74
  else:
65
75
  A_ = self.S[x]
66
76
  A_rows = A_.view([('', V_.dtype)] * A_.shape[1])
77
+
67
78
  V_ = np.setdiff1d(V_rows, A_rows).view(V_.dtype).reshape(-1, V_.shape[1])
68
79
 
69
- self.v_gp = AugmentedGPR(data=(V_, np.zeros((len(V_), 1))),
70
- kernel=self.kernel,
71
- noise_variance=self.noise_variance,
72
- transform=self.transform)
80
+ self.v_gp = GPR(data=(V_, np.zeros((len(V_), 1))),
81
+ kernel=self.kernel,
82
+ noise_variance=self.noise_variance)
73
83
  _, sigma_v = self.v_gp.predict_f(y)
74
84
 
75
85
  return (sigma_a/sigma_v).numpy().squeeze()
@@ -30,20 +30,16 @@ class GreedySGP:
30
30
  V (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
31
31
  noise_variance (float): Data noise variance
32
32
  kernel (gpflow.kernels.Kernel): gpflow kernel function
33
- Xu_fixed (ndarray): (m, d); Inducing points that are not optimized and are always
34
- added to the inducing points set during loss function computation
35
33
  transform (Transform): Transform object
36
34
  """
37
35
  def __init__(self, num_inducing, S, V, noise_variance, kernel,
38
- Xu_fixed=None,
39
36
  transform=None):
40
- self.gp = AugmentedSGPR((V, np.zeros((len(V), 1))),
41
- noise_variance=noise_variance,
42
- kernel=kernel,
43
- inducing_variable=S[:num_inducing],
44
- transform=transform)
37
+ self.sgp = AugmentedSGPR((V, np.zeros((len(V), 1))),
38
+ noise_variance=noise_variance,
39
+ kernel=kernel,
40
+ inducing_variable=S[:num_inducing],
41
+ transform=transform)
45
42
  self.locs = S
46
- self.Xu_fixed = Xu_fixed
47
43
  self.num_inducing = num_inducing
48
44
  self.inducing_dim = S.shape[1]
49
45
 
@@ -51,21 +47,25 @@ class GreedySGP:
51
47
  """Computes the SGP's optimization bound using the inducing points `x`
52
48
 
53
49
  Args:
54
- x (ndarray): (n, d); Inducing points
50
+ x (ndarray): (n); Indices of the solution placement locations
55
51
 
56
52
  Returns:
57
53
  elbo (float): Evidence lower bound/SGP's optimization bound value
58
54
  """
59
55
  x = np.array(x).reshape(-1).astype(int)
60
56
  Xu = np.ones((self.num_inducing, self.inducing_dim), dtype=np.float32)
57
+
58
+ # Initialize all inducing points at the first solution placement location.
59
+ # Ensures that the number of inducing points is always fixed and no additional
60
+ # information is passed to the SGP
61
61
  Xu *= self.locs[x][0]
62
- Xu[-len(x):] = self.locs[x]
63
62
 
64
- if self.Xu_fixed is not None:
65
- Xu[:len(self.Xu_fixed)] = self.Xu_fixed
63
+ # Copy all given solution placements to the inducing points set
64
+ Xu[-len(x):] = self.locs[x]
66
65
 
67
- self.gp.inducing_variable.Z.assign(Xu)
68
- return self.gp.elbo().numpy()
66
+ # Update the SGP inducing points
67
+ self.sgp.inducing_variable.Z.assign(Xu)
68
+ return self.sgp.elbo().numpy() # return the ELBO
69
69
 
70
70
 
71
71
  def get_greedy_sgp_sol(num_sensors, candidates, X_train, noise_variance, kernel,
sgptools/utils/misc.py CHANGED
@@ -86,6 +86,7 @@ def plot_paths(paths, candidates=None, title=None):
86
86
  plt.legend(bbox_to_anchor=(1.0, 1.02))
87
87
  if title is not None:
88
88
  plt.title(title)
89
+ plt.gca().set_aspect('equal')
89
90
  plt.xlabel('X')
90
91
  plt.ylabel('Y')
91
92
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sgptools
3
- Version: 1.0.6
3
+ Version: 1.1.1
4
4
  Summary: Software Suite for Sensor Placement and Informative Path Planning
5
5
  Home-page: https://www.itskalvik.com/sgp-tools
6
6
  Author: Kalvik
@@ -13,18 +13,18 @@ Requires-Dist: matplotlib
13
13
  Requires-Dist: pandas
14
14
  Requires-Dist: scikit-learn
15
15
  Requires-Dist: scipy
16
- Requires-Dist: numpy
16
+ Requires-Dist: numpy<2.0.0
17
17
  Requires-Dist: ortools
18
18
  Requires-Dist: scikit-image
19
19
  Requires-Dist: shapely
20
20
  Requires-Dist: cma
21
21
  Requires-Dist: bayesian-optimization
22
- Requires-Dist: hkb-diamondsquare
23
- Requires-Dist: tensorflow-probability[tf] >=0.21.0
24
- Requires-Dist: typing-extensions
25
- Requires-Dist: gpflow >=2.7.0
22
+ Requires-Dist: hkb_diamondsquare
23
+ Requires-Dist: tensorflow-probability[tf]>=0.21.0
24
+ Requires-Dist: tensorflow>=2.13.0; platform_machine != "arm64"
25
+ Requires-Dist: tensorflow-aarch64>=2.13.0; platform_machine == "arm64"
26
+ Requires-Dist: typing_extensions
27
+ Requires-Dist: gpflow>=2.7.0
26
28
  Requires-Dist: pillow
27
- Requires-Dist: tensorflow >=2.13.0 ; platform_machine != "arm64"
28
- Requires-Dist: tensorflow-aarch64 >=2.13.0 ; platform_machine == "arm64"
29
29
 
30
30
  Software Suite for Sensor Placement and Informative Path Planning
@@ -1,25 +1,25 @@
1
- sgptools/__init__.py,sha256=J4jZOdSzuUtaabg4TtYxxWfQwZkA0E_bVK3aaBPgPAc,449
1
+ sgptools/__init__.py,sha256=pC3DPZgQ3mDxhgiKB8E9pyfFlxkeXmmwZCyHSY9T_a4,449
2
2
  sgptools/kernels/__init__.py,sha256=zRf4y-wJwjXKt1uOnmI5MbzCA6pRlyA7C-eagLfb3d0,190
3
3
  sgptools/kernels/neural_kernel.py,sha256=9XEjcwwi1Gwj4D5cAZwq5QdWqMaI-Vu2DKgYO58DmPg,6709
4
4
  sgptools/models/__init__.py,sha256=X2lIg9kf1-2MHUswk-VW2dHHcbSLxf6_IuV7lc_kvDc,682
5
- sgptools/models/bo.py,sha256=RkcbD0t2d1gkZf7SMtMyymlYne9sFEaA4pjmgQhlTWY,3640
6
- sgptools/models/cma_es.py,sha256=URLndW_lb9zQj6XrxO6tnm9U6xld8jgsrqsg-Oo5vyw,8006
5
+ sgptools/models/bo.py,sha256=sjs18oRXL-yoNiLoaaoROjaJXqfj_CwouJPe9HgzjL0,4857
6
+ sgptools/models/cma_es.py,sha256=LjWRcUIcARcFvAHR2F8prPDmgxLzYI0kRwYXzKp3APc,4861
7
7
  sgptools/models/continuous_sgp.py,sha256=USf9fG1Pl-rQfcD_ffP6mB4mrh0F9RMUA7Lfhw9rc48,2940
8
- sgptools/models/greedy_mi.py,sha256=LJVXKHC58iJs1CyfAioAjCeK2o5hiViTchFxuiKj73o,4870
9
- sgptools/models/greedy_sgp.py,sha256=jgQexdEZ_z88iD6sM_D0NhJvBodQ9zindh9L7117q_8,4462
8
+ sgptools/models/greedy_mi.py,sha256=06CY6tm9C3iBYEG_DOuQKDmWIww9Ah0rkeJUXsCR2YU,5018
9
+ sgptools/models/greedy_sgp.py,sha256=giddMbU3ohePTdLTcH4fDx-bS9upq1T_K8KUW_Ag6HI,4490
10
10
  sgptools/models/core/__init__.py,sha256=TlUdvrM0A7vSzc5IM8C2Y2kliB1ip7YLEcHHzvuw-C4,482
11
11
  sgptools/models/core/augmented_gpr.py,sha256=NuYwlggz7ho7pvW4-so3ghos5vZ8oK7nRZqvHpAt0Zk,3497
12
12
  sgptools/models/core/augmented_sgpr.py,sha256=qMP9J4AnOUx9AEZfaPhoyb3RP_2AOhOUCUY4eh7uOi0,7185
13
13
  sgptools/models/core/osgpr.py,sha256=gqliUdXdnt3fea206LP0rqGIggmIdKh8WP2DtFWzdBw,11798
14
- sgptools/models/core/transformations.py,sha256=avluSP7g4EWjijlpFQYobO_CHKA0-V7PemC6SdVwrG8,18976
14
+ sgptools/models/core/transformations.py,sha256=X7WEKo_lFAYB5HKnFvxFsxfz6CB-jzPfVWcx1sWe2lI,18313
15
15
  sgptools/utils/__init__.py,sha256=jgWqzSDgUbqOTFo8mkqZaTlyz44l3v2XYPJfcHYHjqM,376
16
16
  sgptools/utils/data.py,sha256=oTXq4oRuzJdXpZC6frUfja8jhwy_ZdDDi7L1BYZcdQs,7309
17
17
  sgptools/utils/gpflow.py,sha256=LnFYufnMW4ch7qsKnru53QUxEtIzJqE822qj6w8ssRg,8576
18
18
  sgptools/utils/metrics.py,sha256=tu8H129n8GuxV5fQIKLcfzPUxd7sp8zEF9qZBOZjNKo,5834
19
- sgptools/utils/misc.py,sha256=_6hZRoSnbnKCt9H3XRlPT74Wh5VO5zArFddE1dszHGo,5408
19
+ sgptools/utils/misc.py,sha256=LdAFJS7-xubWpRnrgdLOorCa9vB_8vRrvL5cahxHYNA,5442
20
20
  sgptools/utils/tsp.py,sha256=RJAQ4_uE7CUtR1ei3nSnGy-1kNhw82E9P_HyaCkc4iI,7007
21
- sgptools-1.0.6.dist-info/LICENSE.txt,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
22
- sgptools-1.0.6.dist-info/METADATA,sha256=fdUW1EPBQkGG-rKXi6xVa8i4Qt5gIwKDr_8MPsI-Lh4,944
23
- sgptools-1.0.6.dist-info/WHEEL,sha256=ixB2d4u7mugx_bCBycvM9OzZ5yD7NmPXFRtKlORZS2Y,91
24
- sgptools-1.0.6.dist-info/top_level.txt,sha256=2NWH6uQLAOuLB9fG7o1pqf6Jvpe1_hEcuqfSqtUw3gw,9
25
- sgptools-1.0.6.dist-info/RECORD,,
21
+ sgptools-1.1.1.dist-info/LICENSE.txt,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
22
+ sgptools-1.1.1.dist-info/METADATA,sha256=wGH0O-OJRPgT6AjtTqMQnbae8Zvhu6mqD3_bU3SHRUs,944
23
+ sgptools-1.1.1.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
24
+ sgptools-1.1.1.dist-info/top_level.txt,sha256=2NWH6uQLAOuLB9fG7o1pqf6Jvpe1_hEcuqfSqtUw3gw,9
25
+ sgptools-1.1.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (74.1.0)
2
+ Generator: setuptools (75.6.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5