pyRDDLGym-jax 2.5__tar.gz → 2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/PKG-INFO +5 -13
  2. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/README.md +3 -11
  3. pyrddlgym_jax-2.6/pyRDDLGym_jax/__init__.py +1 -0
  4. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/core/compiler.py +16 -7
  5. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/core/logic.py +6 -8
  6. pyrddlgym_jax-2.6/pyRDDLGym_jax/core/model.py +595 -0
  7. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/core/planner.py +173 -21
  8. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax.egg-info/PKG-INFO +5 -13
  9. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax.egg-info/SOURCES.txt +1 -0
  10. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax.egg-info/requires.txt +1 -1
  11. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/setup.py +2 -2
  12. pyrddlgym_jax-2.5/pyRDDLGym_jax/__init__.py +0 -1
  13. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/LICENSE +0 -0
  14. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/core/__init__.py +0 -0
  15. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/core/assets/__init__.py +0 -0
  16. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/core/assets/favicon.ico +0 -0
  17. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/core/simulator.py +0 -0
  18. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/core/tuning.py +0 -0
  19. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/core/visualization.py +0 -0
  20. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/entry_point.py +0 -0
  21. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/__init__.py +0 -0
  22. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_drp.cfg +0 -0
  23. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_replan.cfg +0 -0
  24. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_slp.cfg +0 -0
  25. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/HVAC_ippc2023_drp.cfg +0 -0
  26. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/HVAC_ippc2023_slp.cfg +0 -0
  27. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/MountainCar_Continuous_gym_slp.cfg +0 -0
  28. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/MountainCar_ippc2023_slp.cfg +0 -0
  29. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/PowerGen_Continuous_drp.cfg +0 -0
  30. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/PowerGen_Continuous_replan.cfg +0 -0
  31. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/PowerGen_Continuous_slp.cfg +0 -0
  32. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Quadcopter_drp.cfg +0 -0
  33. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Quadcopter_slp.cfg +0 -0
  34. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Reservoir_Continuous_drp.cfg +0 -0
  35. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Reservoir_Continuous_replan.cfg +0 -0
  36. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Reservoir_Continuous_slp.cfg +0 -0
  37. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/UAV_Continuous_slp.cfg +0 -0
  38. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Wildfire_MDP_ippc2014_drp.cfg +0 -0
  39. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Wildfire_MDP_ippc2014_replan.cfg +0 -0
  40. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/Wildfire_MDP_ippc2014_slp.cfg +0 -0
  41. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/__init__.py +0 -0
  42. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/default_drp.cfg +0 -0
  43. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/default_replan.cfg +0 -0
  44. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/default_slp.cfg +0 -0
  45. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/tuning_drp.cfg +0 -0
  46. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/tuning_replan.cfg +0 -0
  47. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/configs/tuning_slp.cfg +0 -0
  48. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/run_gradient.py +0 -0
  49. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/run_gym.py +0 -0
  50. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/run_plan.py +0 -0
  51. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/run_scipy.py +0 -0
  52. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax/examples/run_tune.py +0 -0
  53. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax.egg-info/dependency_links.txt +0 -0
  54. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax.egg-info/entry_points.txt +0 -0
  55. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/pyRDDLGym_jax.egg-info/top_level.txt +0 -0
  56. {pyrddlgym_jax-2.5 → pyrddlgym_jax-2.6}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyRDDLGym-jax
3
- Version: 2.5
3
+ Version: 2.6
4
4
  Summary: pyRDDLGym-jax: automatic differentiation for solving sequential planning problems in JAX.
5
5
  Home-page: https://github.com/pyrddlgym-project/pyRDDLGym-jax
6
6
  Author: Michael Gimelfarb, Ayal Taitler, Scott Sanner
@@ -20,7 +20,7 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
20
  Requires-Python: >=3.9
21
21
  Description-Content-Type: text/markdown
22
22
  License-File: LICENSE
23
- Requires-Dist: pyRDDLGym>=2.0
23
+ Requires-Dist: pyRDDLGym>=2.3
24
24
  Requires-Dist: tqdm>=4.66
25
25
  Requires-Dist: jax>=0.4.12
26
26
  Requires-Dist: optax>=0.1.9
@@ -55,7 +55,7 @@ Dynamic: summary
55
55
 
56
56
  [Installation](#installation) | [Run cmd](#running-from-the-command-line) | [Run python](#running-from-another-python-application) | [Configuration](#configuring-the-planner) | [Dashboard](#jaxplan-dashboard) | [Tuning](#tuning-the-planner) | [Simulation](#simulation) | [Citing](#citing-jaxplan)
57
57
 
58
- **pyRDDLGym-jax (known in the literature as JaxPlan) is an efficient gradient-based/differentiable planning algorithm in JAX.**
58
+ **pyRDDLGym-jax (or JaxPlan) is an efficient gradient-based planning algorithm based on JAX.**
59
59
 
60
60
  Purpose:
61
61
 
@@ -84,7 +84,7 @@ and was moved to the individual logic components which have their own unique wei
84
84
 
85
85
  > [!NOTE]
86
86
  > While JaxPlan can support some discrete state/action problems through model relaxations, on some discrete problems it can perform poorly (though there is an ongoing effort to remedy this!).
87
- > If you find it is not making sufficient progress, check out the [PROST planner](https://github.com/pyrddlgym-project/pyRDDLGym-prost) (for discrete spaces) or the [deep reinforcement learning wrappers](https://github.com/pyrddlgym-project/pyRDDLGym-rl).
87
+ > If you find it is not making progress, check out the [PROST planner](https://github.com/pyrddlgym-project/pyRDDLGym-prost) (for discrete spaces) or the [deep reinforcement learning wrappers](https://github.com/pyrddlgym-project/pyRDDLGym-rl).
88
88
 
89
89
  ## Installation
90
90
 
@@ -220,13 +220,7 @@ controller = JaxOfflineController(planner, **train_args)
220
220
  ## JaxPlan Dashboard
221
221
 
222
222
  Since version 1.0, JaxPlan has an optional dashboard that allows keeping track of the planner performance across multiple runs,
223
- and visualization of the policy or model, and other useful debugging features.
224
-
225
- <p align="middle">
226
- <img src="https://github.com/pyrddlgym-project/pyRDDLGym-jax/blob/main/Images/dashboard.png" width="480" height="248" margin=0/>
227
- </p>
228
-
229
- To run the dashboard, add the following entry to your config file:
223
+ and visualization of the policy or model, and other useful debugging features. To run the dashboard, add the following to your config file:
230
224
 
231
225
  ```ini
232
226
  ...
@@ -235,8 +229,6 @@ dashboard=True
235
229
  ...
236
230
  ```
237
231
 
238
- More documentation about this and other new features will be coming soon.
239
-
240
232
  ## Tuning the Planner
241
233
 
242
234
  A basic run script is provided to run automatic Bayesian hyper-parameter tuning for the most sensitive parameters of JaxPlan:
@@ -8,7 +8,7 @@
8
8
 
9
9
  [Installation](#installation) | [Run cmd](#running-from-the-command-line) | [Run python](#running-from-another-python-application) | [Configuration](#configuring-the-planner) | [Dashboard](#jaxplan-dashboard) | [Tuning](#tuning-the-planner) | [Simulation](#simulation) | [Citing](#citing-jaxplan)
10
10
 
11
- **pyRDDLGym-jax (known in the literature as JaxPlan) is an efficient gradient-based/differentiable planning algorithm in JAX.**
11
+ **pyRDDLGym-jax (or JaxPlan) is an efficient gradient-based planning algorithm based on JAX.**
12
12
 
13
13
  Purpose:
14
14
 
@@ -37,7 +37,7 @@ and was moved to the individual logic components which have their own unique wei
37
37
 
38
38
  > [!NOTE]
39
39
  > While JaxPlan can support some discrete state/action problems through model relaxations, on some discrete problems it can perform poorly (though there is an ongoing effort to remedy this!).
40
- > If you find it is not making sufficient progress, check out the [PROST planner](https://github.com/pyrddlgym-project/pyRDDLGym-prost) (for discrete spaces) or the [deep reinforcement learning wrappers](https://github.com/pyrddlgym-project/pyRDDLGym-rl).
40
+ > If you find it is not making progress, check out the [PROST planner](https://github.com/pyrddlgym-project/pyRDDLGym-prost) (for discrete spaces) or the [deep reinforcement learning wrappers](https://github.com/pyrddlgym-project/pyRDDLGym-rl).
41
41
 
42
42
  ## Installation
43
43
 
@@ -173,13 +173,7 @@ controller = JaxOfflineController(planner, **train_args)
173
173
  ## JaxPlan Dashboard
174
174
 
175
175
  Since version 1.0, JaxPlan has an optional dashboard that allows keeping track of the planner performance across multiple runs,
176
- and visualization of the policy or model, and other useful debugging features.
177
-
178
- <p align="middle">
179
- <img src="https://github.com/pyrddlgym-project/pyRDDLGym-jax/blob/main/Images/dashboard.png" width="480" height="248" margin=0/>
180
- </p>
181
-
182
- To run the dashboard, add the following entry to your config file:
176
+ and visualization of the policy or model, and other useful debugging features. To run the dashboard, add the following to your config file:
183
177
 
184
178
  ```ini
185
179
  ...
@@ -188,8 +182,6 @@ dashboard=True
188
182
  ...
189
183
  ```
190
184
 
191
- More documentation about this and other new features will be coming soon.
192
-
193
185
  ## Tuning the Planner
194
186
 
195
187
  A basic run script is provided to run automatic Bayesian hyper-parameter tuning for the most sensitive parameters of JaxPlan:
@@ -0,0 +1 @@
1
+ __version__ = '2.6'
@@ -237,7 +237,8 @@ class JaxRDDLCompiler:
237
237
 
238
238
  def compile_transition(self, check_constraints: bool=False,
239
239
  constraint_func: bool=False,
240
- init_params_constr: Dict[str, Any]={}) -> Callable:
240
+ init_params_constr: Dict[str, Any]={},
241
+ cache_path_info: bool=False) -> Callable:
241
242
  '''Compiles the current RDDL model into a JAX transition function that
242
243
  samples the next state.
243
244
 
@@ -274,6 +275,7 @@ class JaxRDDLCompiler:
274
275
  returned log and does not raise an exception
275
276
  :param constraint_func: produces the h(s, a) function described above
276
277
  in addition to the usual outputs
278
+ :param cache_path_info: whether to save full path traces as part of the log
277
279
  '''
278
280
  NORMAL = JaxRDDLCompiler.ERROR_CODES['NORMAL']
279
281
  rddl = self.rddl
@@ -322,8 +324,11 @@ class JaxRDDLCompiler:
322
324
  errors |= err
323
325
 
324
326
  # calculate fluent values
325
- fluents = {name: values for (name, values) in subs.items()
326
- if name not in rddl.non_fluents}
327
+ if cache_path_info:
328
+ fluents = {name: values for (name, values) in subs.items()
329
+ if name not in rddl.non_fluents}
330
+ else:
331
+ fluents = {}
327
332
 
328
333
  # set the next state to the current state
329
334
  for (state, next_state) in rddl.next_state.items():
@@ -367,7 +372,9 @@ class JaxRDDLCompiler:
367
372
  n_batch: int,
368
373
  check_constraints: bool=False,
369
374
  constraint_func: bool=False,
370
- init_params_constr: Dict[str, Any]={}) -> Callable:
375
+ init_params_constr: Dict[str, Any]={},
376
+ model_params_reduction: Callable=lambda x: x[0],
377
+ cache_path_info: bool=False) -> Callable:
371
378
  '''Compiles the current RDDL model into a JAX transition function that
372
379
  samples trajectories with a fixed horizon from a policy.
373
380
 
@@ -399,10 +406,13 @@ class JaxRDDLCompiler:
399
406
  returned log and does not raise an exception
400
407
  :param constraint_func: produces the h(s, a) constraint function
401
408
  in addition to the usual outputs
409
+ :param model_params_reduction: how to aggregate updated model_params across runs
410
+ in the batch (defaults to selecting the first element's parameters in the batch)
411
+ :param cache_path_info: whether to save full path traces as part of the log
402
412
  '''
403
413
  rddl = self.rddl
404
414
  jax_step_fn = self.compile_transition(
405
- check_constraints, constraint_func, init_params_constr)
415
+ check_constraints, constraint_func, init_params_constr, cache_path_info)
406
416
 
407
417
  # for POMDP only observ-fluents are assumed visible to the policy
408
418
  if rddl.observ_fluents:
@@ -421,7 +431,6 @@ class JaxRDDLCompiler:
421
431
  return jax_step_fn(subkey, actions, subs, model_params)
422
432
 
423
433
  # do a batched step update from the policy
424
- # TODO: come up with a better way to reduce the model_param batch dim
425
434
  def _jax_wrapped_batched_step_policy(carry, step):
426
435
  key, policy_params, hyperparams, subs, model_params = carry
427
436
  key, *subkeys = random.split(key, num=1 + n_batch)
@@ -430,7 +439,7 @@ class JaxRDDLCompiler:
430
439
  _jax_wrapped_single_step_policy,
431
440
  in_axes=(0, None, None, None, 0, None)
432
441
  )(keys, policy_params, hyperparams, step, subs, model_params)
433
- model_params = jax.tree_util.tree_map(partial(jnp.mean, axis=0), model_params)
442
+ model_params = jax.tree_util.tree_map(model_params_reduction, model_params)
434
443
  carry = (key, policy_params, hyperparams, subs, model_params)
435
444
  return carry, log
436
445
 
@@ -1056,15 +1056,13 @@ class ExactLogic(Logic):
1056
1056
  def control_if(self, id, init_params):
1057
1057
  return self._jax_wrapped_calc_if_then_else_exact
1058
1058
 
1059
- @staticmethod
1060
- def _jax_wrapped_calc_switch_exact(pred, cases, params):
1061
- pred = pred[jnp.newaxis, ...]
1062
- sample = jnp.take_along_axis(cases, pred, axis=0)
1063
- assert sample.shape[0] == 1
1064
- return sample[0, ...], params
1065
-
1066
1059
  def control_switch(self, id, init_params):
1067
- return self._jax_wrapped_calc_switch_exact
1060
+ def _jax_wrapped_calc_switch_exact(pred, cases, params):
1061
+ pred = jnp.asarray(pred[jnp.newaxis, ...], dtype=self.INT)
1062
+ sample = jnp.take_along_axis(cases, pred, axis=0)
1063
+ assert sample.shape[0] == 1
1064
+ return sample[0, ...], params
1065
+ return _jax_wrapped_calc_switch_exact
1068
1066
 
1069
1067
  # ===========================================================================
1070
1068
  # random variables