pyRDDLGym-jax 0.4__py3-none-any.whl → 0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,7 +9,7 @@ import warnings
9
9
  warnings.filterwarnings("ignore")
10
10
 
11
11
  from bayes_opt import BayesianOptimization
12
- from bayes_opt.util import UtilityFunction
12
+ from bayes_opt.acquisition import AcquisitionFunction, UpperConfidenceBound
13
13
  import jax
14
14
  import numpy as np
15
15
 
@@ -26,7 +26,6 @@ from pyRDDLGym_jax.core.planner import (
26
26
 
27
27
  Kwargs = Dict[str, Any]
28
28
 
29
-
30
29
  # ===============================================================================
31
30
  #
32
31
  # GENERIC TUNING MODULE
@@ -37,6 +36,9 @@ Kwargs = Dict[str, Any]
37
36
  # 3. deep reactive policies
38
37
  #
39
38
  # ===============================================================================
39
+ COLUMNS = ['pid', 'worker', 'iteration', 'target', 'best_target', 'acq_params']
40
+
41
+
40
42
  class JaxParameterTuning:
41
43
  '''A general-purpose class for tuning a Jax planner.'''
42
44
 
@@ -53,7 +55,7 @@ class JaxParameterTuning:
53
55
  num_workers: int=1,
54
56
  poll_frequency: float=0.2,
55
57
  gp_iters: int=25,
56
- acquisition: Optional[UtilityFunction]=None,
58
+ acquisition: Optional[AcquisitionFunction]=None,
57
59
  gp_init_kwargs: Optional[Kwargs]=None,
58
60
  gp_params: Optional[Kwargs]=None) -> None:
59
61
  '''Creates a new instance for tuning hyper-parameters for Jax planners
@@ -113,10 +115,9 @@ class JaxParameterTuning:
113
115
  self.gp_params = gp_params
114
116
 
115
117
  # create acquisition function
116
- self.acq_args = None
117
118
  if acquisition is None:
118
119
  num_samples = self.gp_iters * self.num_workers
119
- acquisition, self.acq_args = JaxParameterTuning._annealing_utility(num_samples)
120
+ acquisition = JaxParameterTuning._annealing_acquisition(num_samples)
120
121
  self.acquisition = acquisition
121
122
 
122
123
  def summarize_hyperparameters(self) -> None:
@@ -133,23 +134,15 @@ class JaxParameterTuning:
133
134
  f' planning_trials_per_iter ={self.eval_trials}\n'
134
135
  f' planning_iters_per_trial ={self.train_epochs}\n'
135
136
  f' planning_timeout_per_trial={self.timeout_training}\n'
136
- f' acquisition_fn ={type(self.acquisition).__name__}')
137
- if self.acq_args is not None:
138
- print(f'using default acquisition function:\n'
139
- f' utility_kind ={self.acq_args[0]}\n'
140
- f' initial_kappa={self.acq_args[1]}\n'
141
- f' kappa_decay ={self.acq_args[2]}')
137
+ f' acquisition_fn ={self.acquisition}')
142
138
 
143
139
  @staticmethod
144
- def _annealing_utility(n_samples, n_delay_samples=0, kappa1=10.0, kappa2=1.0):
145
- kappa_decay = (kappa2 / kappa1) ** (1.0 / (n_samples - n_delay_samples))
146
- utility_fn = UtilityFunction(
147
- kind='ucb',
140
+ def _annealing_acquisition(n_samples, n_delay_samples=0, kappa1=10.0, kappa2=1.0):
141
+ acq_fn = UpperConfidenceBound(
148
142
  kappa=kappa1,
149
- kappa_decay=kappa_decay,
150
- kappa_decay_delay=n_delay_samples)
151
- utility_args = ['ucb', kappa1, kappa_decay]
152
- return utility_fn, utility_args
143
+ exploration_decay=(kappa2 / kappa1) ** (1.0 / (n_samples - n_delay_samples)),
144
+ exploration_decay_delay=n_delay_samples)
145
+ return acq_fn
153
146
 
154
147
  def _pickleable_objective_with_kwargs(self):
155
148
  raise NotImplementedError
@@ -160,7 +153,7 @@ class JaxParameterTuning:
160
153
  pid = os.getpid()
161
154
  return index, pid, params, target
162
155
 
163
- def tune(self, key: jax.random.PRNGKey,
156
+ def tune(self, key: jax.random.PRNGKey,
164
157
  filename: str,
165
158
  save_plot: bool=False) -> Dict[str, Any]:
166
159
  '''Tunes the hyper-parameters for Jax planner, returns the best found.'''
@@ -178,32 +171,28 @@ class JaxParameterTuning:
178
171
  for (name, hparam) in self.hyperparams_dict.items()
179
172
  }
180
173
  optimizer = BayesianOptimization(
181
- f=None, # probe() is not called
174
+ f=None,
175
+ acquisition_function=self.acquisition,
182
176
  pbounds=hyperparams_bounds,
183
177
  allow_duplicate_points=True, # to avoid crash
184
178
  random_state=np.random.RandomState(key),
185
179
  **self.gp_init_kwargs
186
180
  )
187
181
  optimizer.set_gp_params(**self.gp_params)
188
- utility = self.acquisition
189
182
 
190
183
  # suggest initial parameters to evaluate
191
184
  num_workers = self.num_workers
192
- suggested, kappas = [], []
185
+ suggested, acq_params = [], []
193
186
  for _ in range(num_workers):
194
- utility.update_params()
195
- probe = optimizer.suggest(utility)
196
- suggested.append(probe)
197
- kappas.append(utility.kappa)
187
+ probe = optimizer.suggest()
188
+ suggested.append(probe)
189
+ acq_params.append(vars(optimizer.acquisition_function))
198
190
 
199
191
  # clear and prepare output file
200
192
  filename = self._filename(filename, 'csv')
201
193
  with open(filename, 'w', newline='') as file:
202
194
  writer = csv.writer(file)
203
- writer.writerow(
204
- ['pid', 'worker', 'iteration', 'target', 'best_target', 'kappa'] + \
205
- list(hyperparams_bounds.keys())
206
- )
195
+ writer.writerow(COLUMNS + list(hyperparams_bounds.keys()))
207
196
 
208
197
  # start multiprocess evaluation
209
198
  worker_ids = list(range(num_workers))
@@ -219,8 +208,8 @@ class JaxParameterTuning:
219
208
 
220
209
  # continue with next iteration
221
210
  print('\n' + '*' * 25 +
222
- '\n' + f'[{datetime.timedelta(seconds=elapsed)}] ' +
223
- f'starting iteration {it}' +
211
+ f'\n[{datetime.timedelta(seconds=elapsed)}] ' +
212
+ f'starting iteration {it + 1}' +
224
213
  '\n' + '*' * 25)
225
214
  key, *subkeys = jax.random.split(key, num=num_workers + 1)
226
215
  rows = [None] * num_workers
@@ -256,10 +245,9 @@ class JaxParameterTuning:
256
245
  optimizer.register(params, target)
257
246
 
258
247
  # update acquisition function and suggest a new point
259
- utility.update_params()
260
- suggested[index] = optimizer.suggest(utility)
261
- old_kappa = kappas[index]
262
- kappas[index] = utility.kappa
248
+ suggested[index] = optimizer.suggest()
249
+ old_acq_params = acq_params[index]
250
+ acq_params[index] = vars(optimizer.acquisition_function)
263
251
 
264
252
  # transform suggestion back to natural space
265
253
  rddl_params = {
@@ -272,8 +260,8 @@ class JaxParameterTuning:
272
260
  best_params, best_target = rddl_params, target
273
261
 
274
262
  # write progress to file in real time
275
- rows[index] = [pid, index, it, target, best_target, old_kappa] + \
276
- list(rddl_params.values())
263
+ info_i = [pid, index, it, target, best_target, old_acq_params]
264
+ rows[index] = info_i + list(rddl_params.values())
277
265
 
278
266
  # write results of all processes in current iteration to file
279
267
  with open(filename, 'a', newline='') as file:
@@ -308,16 +296,20 @@ class JaxParameterTuning:
308
296
  raise_warning(f'failed to import packages matplotlib or sklearn, '
309
297
  f'aborting plot of search space\n{e}', 'red')
310
298
  else:
311
- data = np.loadtxt(filename, delimiter=',', dtype=object)
312
- data, target = data[1:, 3:], data[1:, 2]
313
- data = data.astype(np.float64)
314
- target = target.astype(np.float64)
299
+ with open(filename, 'r') as file:
300
+ data_iter = csv.reader(file, delimiter=',')
301
+ data = [row for row in data_iter]
302
+ data = np.asarray(data, dtype=object)
303
+ hparam = data[1:, len(COLUMNS):].astype(np.float64)
304
+ target = data[1:, 3].astype(np.float64)
315
305
  target = (target - np.min(target)) / (np.max(target) - np.min(target))
316
306
  embedding = MDS(n_components=2, normalized_stress='auto')
317
- data1 = embedding.fit_transform(data)
318
- sc = plt.scatter(data1[:, 0], data1[:, 1], c=target, s=4.,
319
- cmap='seismic', edgecolor='gray',
320
- linewidth=0.01, alpha=0.4)
307
+ hparam_low = embedding.fit_transform(hparam)
308
+ sc = plt.scatter(hparam_low[:, 0], hparam_low[:, 1], c=target, s=5,
309
+ cmap='seismic', edgecolor='gray', linewidth=0)
310
+ ax = plt.gca()
311
+ for i in range(len(target)):
312
+ ax.annotate(str(i), (hparam_low[i, 0], hparam_low[i, 1]), fontsize=3)
321
313
  plt.colorbar(sc)
322
314
  plt.savefig(self._filename('gp_points', 'pdf'))
323
315
  plt.clf()
@@ -342,9 +334,11 @@ def objective_slp(params, kwargs, key, index):
342
334
  std, lr, w, wa = param_values
343
335
  else:
344
336
  std, lr, w = param_values
345
- wa = None
337
+ wa = None
338
+ key, subkey = jax.random.split(key)
346
339
  if kwargs['verbose']:
347
- print(f'[{index}] key={key}, std={std}, lr={lr}, w={w}, wa={wa}...', flush=True)
340
+ print(f'[{index}] key={subkey[0]}, '
341
+ f'std={std}, lr={lr}, w={w}, wa={wa}...', flush=True)
348
342
 
349
343
  # initialize planning algorithm
350
344
  planner = JaxBackpropPlanner(
@@ -358,7 +352,6 @@ def objective_slp(params, kwargs, key, index):
358
352
  model_params = {name: w for name in planner.compiled.model_params}
359
353
 
360
354
  # initialize policy
361
- key, subkey = jax.random.split(key)
362
355
  policy = JaxOfflineController(
363
356
  planner=planner,
364
357
  key=subkey,
@@ -384,7 +377,7 @@ def objective_slp(params, kwargs, key, index):
384
377
  key, subkey = jax.random.split(key)
385
378
  total_reward = policy.evaluate(env, seed=np.array(subkey)[0])['mean']
386
379
  if kwargs['verbose']:
387
- print(f' [{index}] trial {trial + 1} key={subkey}, '
380
+ print(f' [{index}] trial {trial + 1} key={subkey[0]}, '
388
381
  f'reward={total_reward}', flush=True)
389
382
  average_reward += total_reward / kwargs['eval_trials']
390
383
  if kwargs['verbose']:
@@ -474,8 +467,10 @@ def objective_replan(params, kwargs, key, index):
474
467
  else:
475
468
  std, lr, w, T = param_values
476
469
  wa = None
470
+ key, subkey = jax.random.split(key)
477
471
  if kwargs['verbose']:
478
- print(f'[{index}] key={key}, std={std}, lr={lr}, w={w}, wa={wa}, T={T}...', flush=True)
472
+ print(f'[{index}] key={subkey[0]}, '
473
+ f'std={std}, lr={lr}, w={w}, wa={wa}, T={T}...', flush=True)
479
474
 
480
475
  # initialize planning algorithm
481
476
  planner = JaxBackpropPlanner(
@@ -490,7 +485,6 @@ def objective_replan(params, kwargs, key, index):
490
485
  model_params = {name: w for name in planner.compiled.model_params}
491
486
 
492
487
  # initialize controller
493
- key, subkey = jax.random.split(key)
494
488
  policy = JaxOnlineController(
495
489
  planner=planner,
496
490
  key=subkey,
@@ -516,7 +510,7 @@ def objective_replan(params, kwargs, key, index):
516
510
  key, subkey = jax.random.split(key)
517
511
  total_reward = policy.evaluate(env, seed=np.array(subkey)[0])['mean']
518
512
  if kwargs['verbose']:
519
- print(f' [{index}] trial {trial + 1} key={subkey}, '
513
+ print(f' [{index}] trial {trial + 1} key={subkey[0]}, '
520
514
  f'reward={total_reward}', flush=True)
521
515
  average_reward += total_reward / kwargs['eval_trials']
522
516
  if kwargs['verbose']:
@@ -602,9 +596,11 @@ def objective_drp(params, kwargs, key, index):
602
596
  ]
603
597
 
604
598
  # unpack hyper-parameters
605
- lr, w, layers, neurons = param_values
599
+ lr, w, layers, neurons = param_values
600
+ key, subkey = jax.random.split(key)
606
601
  if kwargs['verbose']:
607
- print(f'[{index}] key={key}, lr={lr}, w={w}, layers={layers}, neurons={neurons}...', flush=True)
602
+ print(f'[{index}] key={subkey[0]}, '
603
+ f'lr={lr}, w={w}, layers={layers}, neurons={neurons}...', flush=True)
608
604
 
609
605
  # initialize planning algorithm
610
606
  planner = JaxBackpropPlanner(
@@ -618,7 +614,6 @@ def objective_drp(params, kwargs, key, index):
618
614
  model_params = {name: w for name in planner.compiled.model_params}
619
615
 
620
616
  # initialize policy
621
- key, subkey = jax.random.split(key)
622
617
  policy = JaxOfflineController(
623
618
  planner=planner,
624
619
  key=subkey,
@@ -644,7 +639,7 @@ def objective_drp(params, kwargs, key, index):
644
639
  key, subkey = jax.random.split(key)
645
640
  total_reward = policy.evaluate(env, seed=np.array(subkey)[0])['mean']
646
641
  if kwargs['verbose']:
647
- print(f' [{index}] trial {trial + 1} key={subkey}, '
642
+ print(f' [{index}] trial {trial + 1} key={subkey[0]}, '
648
643
  f'reward={total_reward}', flush=True)
649
644
  average_reward += total_reward / kwargs['eval_trials']
650
645
  if kwargs['verbose']:
@@ -16,4 +16,5 @@ rollout_horizon=30
16
16
  [Training]
17
17
  key=42
18
18
  epochs=1000
19
- train_seconds=1
19
+ train_seconds=1
20
+ print_summary=False
@@ -16,4 +16,5 @@ rollout_horizon=5
16
16
  [Training]
17
17
  key=42
18
18
  epochs=2000
19
- train_seconds=1
19
+ train_seconds=1
20
+ print_summary=False
@@ -16,4 +16,5 @@ rollout_horizon=5
16
16
  [Training]
17
17
  key=42
18
18
  epochs=500
19
- train_seconds=1
19
+ train_seconds=1
20
+ print_summary=False
@@ -6,9 +6,9 @@ tnorm_kwargs={}
6
6
 
7
7
  [Optimizer]
8
8
  method='JaxStraightLinePlan'
9
- method_kwargs={'initializer': 'normal', 'initializer_kwargs': {'stddev': 0.001}}
9
+ method_kwargs={}
10
10
  optimizer='rmsprop'
11
- optimizer_kwargs={'learning_rate': 0.001}
11
+ optimizer_kwargs={'learning_rate': 0.1}
12
12
  batch_size_train=32
13
13
  batch_size_test=32
14
14
  rollout_horizon=5
@@ -17,4 +17,5 @@ rollout_horizon=5
17
17
  key=42
18
18
  epochs=1000
19
19
  train_seconds=1
20
- policy_hyperparams={'cut-out': 10.0, 'put-out': 10.0}
20
+ policy_hyperparams={'cut-out': 10.0, 'put-out': 10.0}
21
+ print_summary=False
@@ -17,4 +17,5 @@ rollout_horizon=5
17
17
  key=42
18
18
  epochs=2000
19
19
  train_seconds=1
20
- policy_hyperparams=2.0
20
+ policy_hyperparams=2.0
21
+ print_summary=False
@@ -59,9 +59,7 @@ def main(domain, instance, method, trials=5, iters=20, workers=4):
59
59
  gp_iters=iters)
60
60
 
61
61
  # perform tuning and report best parameters
62
- best = tuning.tune(key=train_args['key'], filename=f'gp_{method}',
63
- save_plot=True)
64
- print(f'best parameters found: {best}')
62
+ tuning.tune(key=train_args['key'], filename=f'gp_{method}', save_plot=True)
65
63
 
66
64
 
67
65
  if __name__ == "__main__":
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyRDDLGym-jax
3
- Version: 0.4
3
+ Version: 0.5
4
4
  Summary: pyRDDLGym-jax: automatic differentiation for solving sequential planning problems in JAX.
5
5
  Home-page: https://github.com/pyrddlgym-project/pyRDDLGym-jax
6
6
  Author: Michael Gimelfarb, Ayal Taitler, Scott Sanner
@@ -13,16 +13,18 @@ Classifier: Natural Language :: English
13
13
  Classifier: Operating System :: OS Independent
14
14
  Classifier: Programming Language :: Python :: 3
15
15
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
16
- Requires-Python: >=3.8
16
+ Requires-Python: >=3.9
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
19
  Requires-Dist: pyRDDLGym >=2.0
20
20
  Requires-Dist: tqdm >=4.66
21
- Requires-Dist: bayesian-optimization >=1.4.3
22
21
  Requires-Dist: jax >=0.4.12
23
22
  Requires-Dist: optax >=0.1.9
24
23
  Requires-Dist: dm-haiku >=0.0.10
25
24
  Requires-Dist: tensorflow-probability >=0.21.0
25
+ Provides-Extra: extra
26
+ Requires-Dist: bayesian-optimization >=2.0.0 ; extra == 'extra'
27
+ Requires-Dist: rddlrepository >=2.0 ; extra == 'extra'
26
28
 
27
29
  # pyRDDLGym-jax
28
30
 
@@ -57,17 +59,17 @@ To use the compiler or planner without the automated hyper-parameter tuning, you
57
59
  - ``tensorflow-probability>=0.21.0``
58
60
 
59
61
  Additionally, if you wish to run the examples, you need ``rddlrepository>=2``.
60
- To run the automated tuning optimization, you will also need ``bayesian-optimization>=1.4.3``.
62
+ To run the automated tuning optimization, you will also need ``bayesian-optimization>=2.0.0``.
61
63
 
62
- You can install this package, together with all of its requirements, via pip:
64
+ You can install pyRDDLGym-jax with all requirements using pip:
63
65
 
64
66
  ```shell
65
- pip install rddlrepository pyRDDLGym-jax
67
+ pip install pyRDDLGym-jax[extra]
66
68
  ```
67
69
 
68
70
  ## Running from the Command Line
69
71
 
70
- A basic run script is provided to run the Jax Planner on any domain in ``rddlrepository``, and can be launched in the command line from the install directory of pyRDDLGym-jax:
72
+ A basic run script is provided to run the Jax Planner on any domain in ``rddlrepository`` from the install directory of pyRDDLGym-jax:
71
73
 
72
74
  ```shell
73
75
  python -m pyRDDLGym_jax.examples.run_plan <domain> <instance> <method> <episodes>
@@ -91,8 +93,8 @@ python -m pyRDDLGym_jax.examples.run_tune <domain> <instance> <method> <trials>
91
93
  ```
92
94
 
93
95
  where:
94
- - ``domain`` is the domain identifier as specified in rddlrepository (i.e. Wildfire_MDP_ippc2014)
95
- - ``instance`` is the instance identifier (i.e. 1, 2, ... 10)
96
+ - ``domain`` is the domain identifier as specified in rddlrepository
97
+ - ``instance`` is the instance identifier
96
98
  - ``method`` is the planning method to use (i.e. drp, slp, replan)
97
99
  - ``trials`` is the (optional) number of trials/episodes to average in evaluating each hyper-parameter setting
98
100
  - ``iters`` is the (optional) maximum number of iterations/evaluations of Bayesian optimization to perform
@@ -1,18 +1,18 @@
1
1
  pyRDDLGym_jax/__init__.py,sha256=rexmxcBiCOcwctw4wGvk7UxS9MfZn_1CYXp53SoLKlU,19
2
2
  pyRDDLGym_jax/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  pyRDDLGym_jax/core/compiler.py,sha256=SnDN3-J84Wv_YVHoDmfM_U4Ob8uaFLGX4vEaeWC-ERY,90037
4
- pyRDDLGym_jax/core/logic.py,sha256=o1YAjMnXfi8gwb42kAigBmaf9uIYUWal9__FEkWohrk,26733
5
- pyRDDLGym_jax/core/planner.py,sha256=Hrwfn88bUu1LNZcnFC5psHPzcIUbPeF4Rn1pFO6_qH0,102655
4
+ pyRDDLGym_jax/core/logic.py,sha256=s00vbLZgBA_EVzREiMLR3Ru2zrtGJu0b3agtNYGnN7E,28937
5
+ pyRDDLGym_jax/core/planner.py,sha256=p_5BpOK3RHBZFUmKZ1P1VKaBl_Pz9Vx6OPB7P57nYyQ,106336
6
6
  pyRDDLGym_jax/core/simulator.py,sha256=hWv6pr-4V-SSCzBYgdIPmKdUDMalft-Zh6dzOo5O9-0,8331
7
- pyRDDLGym_jax/core/tuning.py,sha256=D_kD8wjqMroCdtjE9eksR2UqrqXJqazsAKrMEHwPxYM,29589
7
+ pyRDDLGym_jax/core/tuning.py,sha256=TeKcBT9IxSeb5MShPWL8KuL4G7OPlMBFvRTjyehHv4M,29442
8
8
  pyRDDLGym_jax/examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  pyRDDLGym_jax/examples/run_gradient.py,sha256=KhXvijRDZ4V7N8NOI2WV8ePGpPna5_vnET61YwS7Tco,2919
10
10
  pyRDDLGym_jax/examples/run_gym.py,sha256=rXvNWkxe4jHllvbvU_EOMji_2-2k5d4tbBKhpMm_Gaw,1526
11
11
  pyRDDLGym_jax/examples/run_plan.py,sha256=OENf8s-SrMlh7CYXNhanQiau35b4atLBJMNjgP88DCg,2463
12
12
  pyRDDLGym_jax/examples/run_scipy.py,sha256=wvcpWCvdjvYHntO95a7JYfY2fuCMUTKnqjJikW0PnL4,2291
13
- pyRDDLGym_jax/examples/run_tune.py,sha256=-M4KoBpg5lshQ4mmU0cnLs2i7-ldSIr_OcxHK7YA6bw,3273
13
+ pyRDDLGym_jax/examples/run_tune.py,sha256=_Q6WWMHYnWReU8Q0dbrS6Evaw-1LsN8TFcJWe5QV7xo,3196
14
14
  pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_drp.cfg,sha256=pbkz6ccgk5dHXp7cfYbZNFyJobpGyxUZleCy4fvlmaU,336
15
- pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_replan.cfg,sha256=OswO9YD4Xh1pw3R3LkUBb67WLtj5XlE3qnMQ5CKwPsM,332
15
+ pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_replan.cfg,sha256=0_9-r1wJncP1C1hbWCBAZJxRnNFGboQn-H3kyyDlH_8,353
16
16
  pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_slp.cfg,sha256=FxZ4xcg2j2PzeH-wUseRR280juQN5bJjoyt6PtI1W7c,329
17
17
  pyRDDLGym_jax/examples/configs/HVAC_ippc2023_drp.cfg,sha256=FTGFwRAGyeRrbDMh_FV8iv8ZHrlj3Htju4pfPNmKIcw,336
18
18
  pyRDDLGym_jax/examples/configs/HVAC_ippc2023_slp.cfg,sha256=wjtz86_Gz0RfQu3bbrz56PTXL8JMernINx7AtJuZCPs,314
@@ -22,23 +22,23 @@ pyRDDLGym_jax/examples/configs/MountainCar_Continuous_gym_slp.cfg,sha256=e7j-1Z6
22
22
  pyRDDLGym_jax/examples/configs/MountainCar_ippc2023_slp.cfg,sha256=Z6CxaOxHv4oF6nW7SfSn_HshlQGDlNCPGASTnDTdL7Q,327
23
23
  pyRDDLGym_jax/examples/configs/Pendulum_gym_slp.cfg,sha256=Uy1mrX-AZMS-KBAhWXJ3c_QAhd4bRSWttDoFGYQ08lQ,315
24
24
  pyRDDLGym_jax/examples/configs/PowerGen_Continuous_drp.cfg,sha256=SM5_U4RwvvucHVAOdMG4vqH0Eg43f3WX9ZlV6aFPgTw,341
25
- pyRDDLGym_jax/examples/configs/PowerGen_Continuous_replan.cfg,sha256=lcqQ7P7X4qAbMlpkKKuYGn2luSZH-yFB7oi-eHj9Qng,332
25
+ pyRDDLGym_jax/examples/configs/PowerGen_Continuous_replan.cfg,sha256=uFduTjWwQxAVuKnNsmPLQB92MaBPmaHC59HNu8117FE,353
26
26
  pyRDDLGym_jax/examples/configs/PowerGen_Continuous_slp.cfg,sha256=kG1-02ScmwsEwX7QIAZTD7si90Mb06b79G5oqcMQ9Hg,316
27
27
  pyRDDLGym_jax/examples/configs/Quadcopter_drp.cfg,sha256=yGMBWiVZT8KdZ1PhQ4kIxPvnjht1ss0UheTV-Nt9oaA,364
28
28
  pyRDDLGym_jax/examples/configs/Quadcopter_slp.cfg,sha256=9QNl58PyoJYhmwvrhzUxlLEy8vGbmwE6lRuOdvhLjGQ,317
29
29
  pyRDDLGym_jax/examples/configs/Reservoir_Continuous_drp.cfg,sha256=rrubYvC1q7Ff0ADV0GXtLw-rD9E4m7qfR66qxdYNTD8,339
30
- pyRDDLGym_jax/examples/configs/Reservoir_Continuous_replan.cfg,sha256=DAb-J2KwvJXViRRSHZe8aJwZiPljC28HtrKJPieeUCY,331
30
+ pyRDDLGym_jax/examples/configs/Reservoir_Continuous_replan.cfg,sha256=dGs2ZC6xZwU7_Wz68u2bSPbGBRCRjIb8XDVuK-ejOZA,352
31
31
  pyRDDLGym_jax/examples/configs/Reservoir_Continuous_slp.cfg,sha256=QwKzCAFaErrTCHaJwDPLOxPHpNGNuAKMUoZjLLnMrNc,314
32
32
  pyRDDLGym_jax/examples/configs/UAV_Continuous_slp.cfg,sha256=QiJCJYOrdXXZfOTuPleGswREFxjGlqQSA0rw00YJWWI,318
33
33
  pyRDDLGym_jax/examples/configs/Wildfire_MDP_ippc2014_drp.cfg,sha256=PGkgll7h5vhSF13JScKoQ-vpWaAGNJ_PUEhK7jEjNx4,340
34
- pyRDDLGym_jax/examples/configs/Wildfire_MDP_ippc2014_replan.cfg,sha256=kEDAwsJQ_t9WPzPhIxfS0hRtgOhtFdJFfmPtTTJuwUE,454
34
+ pyRDDLGym_jax/examples/configs/Wildfire_MDP_ippc2014_replan.cfg,sha256=01kb0ZyP7kd7Jy0sNuw0mXJUZQGfWuqdTkcGLz0172M,409
35
35
  pyRDDLGym_jax/examples/configs/Wildfire_MDP_ippc2014_slp.cfg,sha256=w2wipsA8PE5OBkYVIKajjtCOtiHqmMeY3XQVPAApwFk,371
36
36
  pyRDDLGym_jax/examples/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
37
  pyRDDLGym_jax/examples/configs/default_drp.cfg,sha256=S2-5hPZtgAwUAFpiCAgSi-cnGhYHSDzMGMmatwhbM78,344
38
- pyRDDLGym_jax/examples/configs/default_replan.cfg,sha256=VWWPhOYBRq4cWwtrChw5pPqRmlX_nHbMvwciHd9hoLc,357
38
+ pyRDDLGym_jax/examples/configs/default_replan.cfg,sha256=4ndUgyFELiFAWhsfnkDqg_LO3fhza24bkMxFgg7Xnmg,378
39
39
  pyRDDLGym_jax/examples/configs/default_slp.cfg,sha256=TG3mtHUnCA7J2Gm9SczENpqAymTnzCE9dj1Z_R-FnVk,340
40
- pyRDDLGym_jax-0.4.dist-info/LICENSE,sha256=Y0Gi6H6mLOKN-oIKGZulQkoTJyPZeAaeuZu7FXH-meg,1095
41
- pyRDDLGym_jax-0.4.dist-info/METADATA,sha256=-Kf8PLxf_7MiiYXzlZAf31kV1pT-Rurc7QY7dT3Fwk0,12857
42
- pyRDDLGym_jax-0.4.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
43
- pyRDDLGym_jax-0.4.dist-info/top_level.txt,sha256=n_oWkP_BoZK0VofvPKKmBZ3NPk86WFNvLhi1BktCbVQ,14
44
- pyRDDLGym_jax-0.4.dist-info/RECORD,,
40
+ pyRDDLGym_jax-0.5.dist-info/LICENSE,sha256=Y0Gi6H6mLOKN-oIKGZulQkoTJyPZeAaeuZu7FXH-meg,1095
41
+ pyRDDLGym_jax-0.5.dist-info/METADATA,sha256=TIAuFrlsilIcT1lWHdshjECEuBDO4yElSMGB_gH2EDM,12842
42
+ pyRDDLGym_jax-0.5.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
43
+ pyRDDLGym_jax-0.5.dist-info/top_level.txt,sha256=n_oWkP_BoZK0VofvPKKmBZ3NPk86WFNvLhi1BktCbVQ,14
44
+ pyRDDLGym_jax-0.5.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.3.0)
2
+ Generator: setuptools (75.5.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5