pyRDDLGym-jax 1.1__py3-none-any.whl → 1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyRDDLGym_jax/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = '1.1'
1
+ __version__ = '1.3'
File without changes
Binary file
@@ -655,7 +655,10 @@ class JaxStraightLinePlan(JaxPlan):
655
655
  if ranges[var] == 'bool':
656
656
  param_flat = jnp.ravel(param)
657
657
  if noop[var]:
658
- param_flat = (-param_flat) if wrap_sigmoid else 1.0 - param_flat
658
+ if wrap_sigmoid:
659
+ param_flat = -param_flat
660
+ else:
661
+ param_flat = 1.0 - param_flat
659
662
  scores.append(param_flat)
660
663
  scores = jnp.concatenate(scores)
661
664
  descending = jnp.sort(scores)[::-1]
@@ -666,7 +669,10 @@ class JaxStraightLinePlan(JaxPlan):
666
669
  new_params = {}
667
670
  for (var, param) in params.items():
668
671
  if ranges[var] == 'bool':
669
- new_param = param + (surplus if noop[var] else -surplus)
672
+ if noop[var]:
673
+ new_param = param + surplus
674
+ else:
675
+ new_param = param - surplus
670
676
  new_param = _jax_project_bool_to_box(var, new_param, hyperparams)
671
677
  else:
672
678
  new_param = param
@@ -687,57 +693,73 @@ class JaxStraightLinePlan(JaxPlan):
687
693
  elif use_constraint_satisfaction and not self._use_new_projection:
688
694
 
689
695
  # calculate the surplus of actions above max-nondef-actions
690
- def _jax_wrapped_sogbofa_surplus(params, hyperparams):
691
- sum_action, count = 0.0, 0
692
- for (var, param) in params.items():
696
+ def _jax_wrapped_sogbofa_surplus(actions):
697
+ sum_action, k = 0.0, 0
698
+ for (var, action) in actions.items():
693
699
  if ranges[var] == 'bool':
694
- action = _jax_bool_param_to_action(var, param, hyperparams)
695
700
  if noop[var]:
696
- sum_action += jnp.size(action) - jnp.sum(action)
697
- count += jnp.sum(action < 1)
698
- else:
699
- sum_action += jnp.sum(action)
700
- count += jnp.sum(action > 0)
701
+ action = 1 - action
702
+ sum_action += jnp.sum(action)
703
+ k += jnp.count_nonzero(action)
701
704
  surplus = jnp.maximum(sum_action - allowed_actions, 0.0)
702
- count = jnp.maximum(count, 1)
703
- return surplus / count
705
+ return surplus, k
704
706
 
705
707
  # return whether the surplus is positive or reached compute limit
706
708
  max_constraint_iter = self._max_constraint_iter
707
709
 
708
710
  def _jax_wrapped_sogbofa_continue(values):
709
- it, _, _, surplus = values
710
- return jnp.logical_and(it < max_constraint_iter, surplus > 0)
711
+ it, _, surplus, k = values
712
+ return jnp.logical_and(
713
+ it < max_constraint_iter, jnp.logical_and(surplus > 0, k > 0))
711
714
 
712
715
  # reduce all bool action values by the surplus clipping at minimum
713
716
  # for no-op = True, do the opposite, i.e. increase all
714
717
  # bool action values by surplus clipping at maximum
715
718
  def _jax_wrapped_sogbofa_subtract_surplus(values):
716
- it, params, hyperparams, surplus = values
717
- new_params = {}
718
- for (var, param) in params.items():
719
+ it, actions, surplus, k = values
720
+ amount = surplus / k
721
+ new_actions = {}
722
+ for (var, action) in actions.items():
719
723
  if ranges[var] == 'bool':
720
- action = _jax_bool_param_to_action(var, param, hyperparams)
721
- new_action = action + (surplus if noop[var] else -surplus)
722
- new_action = jnp.clip(new_action, min_action, max_action)
723
- new_param = _jax_bool_action_to_param(var, new_action, hyperparams)
724
+ if noop[var]:
725
+ new_actions[var] = jnp.minimum(action + amount, 1)
726
+ else:
727
+ new_actions[var] = jnp.maximum(action - amount, 0)
724
728
  else:
725
- new_param = param
726
- new_params[var] = new_param
727
- new_surplus = _jax_wrapped_sogbofa_surplus(new_params, hyperparams)
729
+ new_actions[var] = action
730
+ new_surplus, new_k = _jax_wrapped_sogbofa_surplus(new_actions)
728
731
  new_it = it + 1
729
- return new_it, new_params, hyperparams, new_surplus
732
+ return new_it, new_actions, new_surplus, new_k
730
733
 
731
734
  # apply the surplus to the actions until it becomes zero
732
735
  def _jax_wrapped_sogbofa_project(params, hyperparams):
733
- surplus = _jax_wrapped_sogbofa_surplus(params, hyperparams)
734
- _, params, _, surplus = jax.lax.while_loop(
736
+
737
+ # convert parameters to actions
738
+ actions = {}
739
+ for (var, param) in params.items():
740
+ if ranges[var] == 'bool':
741
+ actions[var] = _jax_bool_param_to_action(var, param, hyperparams)
742
+ else:
743
+ actions[var] = param
744
+
745
+ # run SOGBOFA loop on the actions to get adjusted actions
746
+ surplus, k = _jax_wrapped_sogbofa_surplus(actions)
747
+ _, actions, surplus, k = jax.lax.while_loop(
735
748
  cond_fun=_jax_wrapped_sogbofa_continue,
736
749
  body_fun=_jax_wrapped_sogbofa_subtract_surplus,
737
- init_val=(0, params, hyperparams, surplus)
750
+ init_val=(0, actions, surplus, k)
738
751
  )
739
752
  converged = jnp.logical_not(surplus > 0)
740
- return params, converged
753
+
754
+ # convert the adjusted actions back to parameters
755
+ new_params = {}
756
+ for (var, action) in actions.items():
757
+ if ranges[var] == 'bool':
758
+ action = jnp.clip(action, min_action, max_action)
759
+ new_params[var] = _jax_bool_action_to_param(var, action, hyperparams)
760
+ else:
761
+ new_params[var] = action
762
+ return new_params, converged
741
763
 
742
764
  # clip actions to valid bounds and satisfy constraint on max actions
743
765
  def _jax_wrapped_slp_project_to_max_constraint(params, hyperparams):
@@ -1415,6 +1437,7 @@ r"""
1415
1437
 
1416
1438
  # optimization
1417
1439
  self.update = self._jax_update(train_loss)
1440
+ self.check_zero_grad = self._jax_check_zero_gradients()
1418
1441
 
1419
1442
  def _jax_return(self, use_symlog):
1420
1443
  gamma = self.rddl.discount
@@ -1497,6 +1520,18 @@ r"""
1497
1520
 
1498
1521
  return jax.jit(_jax_wrapped_plan_update)
1499
1522
 
1523
+ def _jax_check_zero_gradients(self):
1524
+
1525
+ def _jax_wrapped_zero_gradient(grad):
1526
+ return jnp.allclose(grad, 0)
1527
+
1528
+ def _jax_wrapped_zero_gradients(grad):
1529
+ leaves, _ = jax.tree_util.tree_flatten(
1530
+ jax.tree_map(_jax_wrapped_zero_gradient, grad))
1531
+ return jnp.all(jnp.asarray(leaves))
1532
+
1533
+ return jax.jit(_jax_wrapped_zero_gradients)
1534
+
1500
1535
  def _batched_init_subs(self, subs):
1501
1536
  rddl = self.rddl
1502
1537
  n_train, n_test = self.batch_size_train, self.batch_size_test
@@ -1795,7 +1830,6 @@ r"""
1795
1830
  rolling_test_loss = RollingMean(test_rolling_window)
1796
1831
  log = {}
1797
1832
  status = JaxPlannerStatus.NORMAL
1798
- is_all_zero_fn = lambda x: np.allclose(x, 0)
1799
1833
 
1800
1834
  # initialize stopping criterion
1801
1835
  if stopping_rule is not None:
@@ -1836,9 +1870,7 @@ r"""
1836
1870
  # ==================================================================
1837
1871
 
1838
1872
  # no progress
1839
- grad_norm_zero, _ = jax.tree_util.tree_flatten(
1840
- jax.tree_map(is_all_zero_fn, train_log['grad']))
1841
- if np.all(grad_norm_zero):
1873
+ if self.check_zero_grad(train_log['grad']):
1842
1874
  status = JaxPlannerStatus.NO_PROGRESS
1843
1875
 
1844
1876
  # constraint satisfaction problem
@@ -2035,8 +2067,8 @@ r"""
2035
2067
  # must be numeric array
2036
2068
  # exception is for POMDPs at 1st epoch when observ-fluents are None
2037
2069
  dtype = np.atleast_1d(values).dtype
2038
- if not jnp.issubdtype(dtype, jnp.number) \
2039
- and not jnp.issubdtype(dtype, jnp.bool_):
2070
+ if not np.issubdtype(dtype, np.number) \
2071
+ and not np.issubdtype(dtype, np.bool_):
2040
2072
  if step == 0 and var in self.rddl.observ_fluents:
2041
2073
  subs[var] = self.test_compiled.init_values[var]
2042
2074
  else:
@@ -2077,10 +2109,11 @@ def mean_variance_utility(returns: jnp.ndarray, beta: float) -> float:
2077
2109
 
2078
2110
  @jax.jit
2079
2111
  def cvar_utility(returns: jnp.ndarray, alpha: float) -> float:
2080
- alpha_mask = jax.lax.stop_gradient(
2081
- returns <= jnp.percentile(returns, q=100 * alpha))
2082
- return jnp.sum(returns * alpha_mask) / jnp.sum(alpha_mask)
2083
-
2112
+ var = jnp.percentile(returns, q=100 * alpha)
2113
+ mask = returns <= var
2114
+ weights = mask / jnp.maximum(1, jnp.sum(mask))
2115
+ return jnp.sum(returns * weights)
2116
+
2084
2117
 
2085
2118
  # ***********************************************************************
2086
2119
  # ALL VERSIONS OF CONTROLLERS
@@ -0,0 +1,27 @@
1
+ import argparse
2
+
3
+ from pyRDDLGym_jax.examples import run_plan, run_tune
4
+
5
+ def main():
6
+ parser = argparse.ArgumentParser(description="Command line parser for the JaxPlan planner.")
7
+ subparsers = parser.add_subparsers(dest="jaxplan", required=True)
8
+
9
+ # planning
10
+ parser_plan = subparsers.add_parser("plan", help="Executes JaxPlan on a specified RDDL problem and method (slp, drp, or replan).")
11
+ parser_plan.add_argument('args', nargs=argparse.REMAINDER)
12
+
13
+ # tuning
14
+ parser_tune = subparsers.add_parser("tune", help="Tunes JaxPlan on a specified RDDL problem and method (slp, drp, or replan).")
15
+ parser_tune.add_argument('args', nargs=argparse.REMAINDER)
16
+
17
+ # dispatch
18
+ args = parser.parse_args()
19
+ if args.jaxplan == "plan":
20
+ run_plan.run_from_args(args.args)
21
+ elif args.jaxplan == "tune":
22
+ run_tune.run_from_args(args.args)
23
+ else:
24
+ parser.print_help()
25
+
26
+ if __name__ == "__main__":
27
+ main()
@@ -12,7 +12,7 @@ The syntax for running this example is:
12
12
  where:
13
13
  <domain> is the name of a domain located in the /Examples directory
14
14
  <instance> is the instance number
15
- <method> is either slp, drp, or replan
15
+ <method> is slp, drp, replan, or a path to a valid .cfg file
16
16
  <episodes> is the optional number of evaluation rollouts
17
17
  '''
18
18
  import os
@@ -32,12 +32,19 @@ def main(domain, instance, method, episodes=1):
32
32
  env = pyRDDLGym.make(domain, instance, vectorized=True)
33
33
 
34
34
  # load the config file with planner settings
35
- abs_path = os.path.dirname(os.path.abspath(__file__))
36
- config_path = os.path.join(abs_path, 'configs', f'{domain}_{method}.cfg')
37
- if not os.path.isfile(config_path):
38
- raise_warning(f'Config file {config_path} was not found, '
39
- f'using default_{method}.cfg.', 'red')
40
- config_path = os.path.join(abs_path, 'configs', f'default_{method}.cfg')
35
+ if method in ['drp', 'slp', 'replan']:
36
+ abs_path = os.path.dirname(os.path.abspath(__file__))
37
+ config_path = os.path.join(abs_path, 'configs', f'{domain}_{method}.cfg')
38
+ if not os.path.isfile(config_path):
39
+ raise_warning(f'Config file {config_path} was not found, '
40
+ f'using default_{method}.cfg.', 'red')
41
+ config_path = os.path.join(abs_path, 'configs', f'default_{method}.cfg')
42
+ elif os.path.isfile(method):
43
+ config_path = method
44
+ else:
45
+ print('method must be slp, drp, replan, or a path to a valid .cfg file.')
46
+ exit(1)
47
+
41
48
  planner_args, _, train_args = load_config(config_path)
42
49
  if 'dashboard' in train_args:
43
50
  train_args['dashboard'].launch()
@@ -54,16 +61,16 @@ def main(domain, instance, method, episodes=1):
54
61
  controller.evaluate(env, episodes=episodes, verbose=True, render=True)
55
62
  env.close()
56
63
 
57
-
58
- if __name__ == "__main__":
59
- args = sys.argv[1:]
64
+
65
+ def run_from_args(args):
60
66
  if len(args) < 3:
61
67
  print('python run_plan.py <domain> <instance> <method> [<episodes>]')
62
68
  exit(1)
63
- if args[2] not in ['drp', 'slp', 'replan']:
64
- print('<method> in [drp, slp, replan]')
65
- exit(1)
66
69
  kwargs = {'domain': args[0], 'instance': args[1], 'method': args[2]}
67
70
  if len(args) >= 4: kwargs['episodes'] = int(args[3])
68
71
  main(**kwargs)
72
+
73
+
74
+ if __name__ == "__main__":
75
+ run_from_args(sys.argv[1:])
69
76
 
@@ -75,8 +75,7 @@ def main(domain, instance, method, trials=5, iters=20, workers=4):
75
75
  env.close()
76
76
 
77
77
 
78
- if __name__ == "__main__":
79
- args = sys.argv[1:]
78
+ def run_from_args(args):
80
79
  if len(args) < 3:
81
80
  print('python run_tune.py <domain> <instance> <method> [<trials>] [<iters>] [<workers>]')
82
81
  exit(1)
@@ -88,4 +87,7 @@ if __name__ == "__main__":
88
87
  if len(args) >= 5: kwargs['iters'] = int(args[4])
89
88
  if len(args) >= 6: kwargs['workers'] = int(args[5])
90
89
  main(**kwargs)
91
-
90
+
91
+
92
+ if __name__ == "__main__":
93
+ run_from_args(sys.argv[1:])
@@ -1,17 +1,21 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: pyRDDLGym-jax
3
- Version: 1.1
3
+ Version: 1.3
4
4
  Summary: pyRDDLGym-jax: automatic differentiation for solving sequential planning problems in JAX.
5
5
  Home-page: https://github.com/pyrddlgym-project/pyRDDLGym-jax
6
6
  Author: Michael Gimelfarb, Ayal Taitler, Scott Sanner
7
7
  Author-email: mike.gimelfarb@mail.utoronto.ca, ataitler@gmail.com, ssanner@mie.utoronto.ca
8
8
  License: MIT License
9
- Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Development Status :: 5 - Production/Stable
10
10
  Classifier: Intended Audience :: Science/Research
11
11
  Classifier: License :: OSI Approved :: MIT License
12
12
  Classifier: Natural Language :: English
13
13
  Classifier: Operating System :: OS Independent
14
14
  Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
15
19
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
16
20
  Requires-Python: >=3.9
17
21
  Description-Content-Type: text/markdown
@@ -28,6 +32,17 @@ Requires-Dist: rddlrepository>=2.0; extra == "extra"
28
32
  Provides-Extra: dashboard
29
33
  Requires-Dist: dash>=2.18.0; extra == "dashboard"
30
34
  Requires-Dist: dash-bootstrap-components>=1.6.0; extra == "dashboard"
35
+ Dynamic: author
36
+ Dynamic: author-email
37
+ Dynamic: classifier
38
+ Dynamic: description
39
+ Dynamic: description-content-type
40
+ Dynamic: home-page
41
+ Dynamic: license
42
+ Dynamic: provides-extra
43
+ Dynamic: requires-dist
44
+ Dynamic: requires-python
45
+ Dynamic: summary
31
46
 
32
47
  # pyRDDLGym-jax
33
48
 
@@ -95,27 +110,28 @@ pip install pyRDDLGym-jax[extra,dashboard]
95
110
 
96
111
  ## Running from the Command Line
97
112
 
98
- A basic run script is provided to run JaxPlan on any domain in ``rddlrepository`` from the install directory of pyRDDLGym-jax:
113
+ A basic run script is provided to train JaxPlan on any RDDL problem:
99
114
 
100
115
  ```shell
101
- python -m pyRDDLGym_jax.examples.run_plan <domain> <instance> <method> <episodes>
116
+ jaxplan plan <domain> <instance> <method> <episodes>
102
117
  ```
103
118
 
104
119
  where:
105
120
  - ``domain`` is the domain identifier as specified in rddlrepository (i.e. Wildfire_MDP_ippc2014), or a path pointing to a valid ``domain.rddl`` file
106
121
  - ``instance`` is the instance identifier (i.e. 1, 2, ... 10), or a path pointing to a valid ``instance.rddl`` file
107
- - ``method`` is the planning method to use (i.e. drp, slp, replan)
122
+ - ``method`` is the planning method to use (i.e. drp, slp, replan) or a path to a valid .cfg file (see section below)
108
123
  - ``episodes`` is the (optional) number of episodes to evaluate the learned policy.
109
124
 
110
- The ``method`` parameter supports three possible modes:
125
+ The ``method`` parameter supports four possible modes:
111
126
  - ``slp`` is the basic straight line planner described [in this paper](https://proceedings.neurips.cc/paper_files/paper/2017/file/98b17f068d5d9b7668e19fb8ae470841-Paper.pdf)
112
127
  - ``drp`` is the deep reactive policy network described [in this paper](https://ojs.aaai.org/index.php/AAAI/article/view/4744)
113
- - ``replan`` is the same as ``slp`` except the plan is recalculated at every decision time step.
128
+ - ``replan`` is the same as ``slp`` except the plan is recalculated at every decision time step
129
+ - any other argument is interpreted as a file path to a valid configuration file.
114
130
 
115
- For example, the following will train JaxPlan on the Quadcopter domain with 4 drones:
131
+ For example, the following will train JaxPlan on the Quadcopter domain with 4 drones (with default config):
116
132
 
117
133
  ```shell
118
- python -m pyRDDLGym_jax.examples.run_plan Quadcopter 1 slp
134
+ jaxplan plan Quadcopter 1 slp
119
135
  ```
120
136
 
121
137
  ## Running from Another Python Application
@@ -197,7 +213,7 @@ controller = JaxOfflineController(planner, **train_args)
197
213
  ...
198
214
  ```
199
215
 
200
- ### JaxPlan Dashboard
216
+ ## JaxPlan Dashboard
201
217
 
202
218
  Since version 1.0, JaxPlan has an optional dashboard that allows keeping track of the planner performance across multiple runs,
203
219
  and visualization of the policy or model, and other useful debugging features.
@@ -217,7 +233,7 @@ dashboard=True
217
233
 
218
234
  More documentation about this and other new features will be coming soon.
219
235
 
220
- ### Tuning the Planner
236
+ ## Tuning the Planner
221
237
 
222
238
  It is easy to tune the planner's hyper-parameters efficiently and automatically using Bayesian optimization.
223
239
  To do this, first create a config file template with patterns replacing concrete parameter values that you want to tune, e.g.:
@@ -280,7 +296,7 @@ tuning.tune(key=42, log_file='path/to/log.csv')
280
296
  A basic run script is provided to run the automatic hyper-parameter tuning for the most sensitive parameters of JaxPlan:
281
297
 
282
298
  ```shell
283
- python -m pyRDDLGym_jax.examples.run_tune <domain> <instance> <method> <trials> <iters> <workers>
299
+ jaxplan tune <domain> <instance> <method> <trials> <iters> <workers>
284
300
  ```
285
301
 
286
302
  where:
@@ -1,17 +1,20 @@
1
- pyRDDLGym_jax/__init__.py,sha256=tw0LYdHs-n56hTnwNcQ9D8gMlYPqN92Vz1GiVo9wBL0,19
1
+ pyRDDLGym_jax/__init__.py,sha256=p_veRZMP15-djJyMuDHT7Ul1RbCCHpYsZ9LO0GD1URo,19
2
+ pyRDDLGym_jax/entry_point.py,sha256=dxDlO_5gneEEViwkLCg30Z-KVzUgdRXaKuFjoZklkA0,974
2
3
  pyRDDLGym_jax/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
4
  pyRDDLGym_jax/core/compiler.py,sha256=qy1TSivdpuZxWecDl5HEM0PXX45JB7DHzV7uAB8kmbE,88696
4
5
  pyRDDLGym_jax/core/logic.py,sha256=iYvLgWyQd_mrkwwoeRWao9NzjmhsObQnPq4DphILw1Q,38425
5
- pyRDDLGym_jax/core/planner.py,sha256=oKs9js7xyIc9-bxQFZSQNBw9s1nWQlz4DjENwEgSojY,100672
6
+ pyRDDLGym_jax/core/planner.py,sha256=TFFy91aCzRW600k_eP-7i2Gvp9wpNVjXlXtBnt9x03M,101744
6
7
  pyRDDLGym_jax/core/simulator.py,sha256=JpmwfPqYPBfEhmQ04ufBeclZOQ-U1ZiyAtLf1AIwO2M,8462
7
8
  pyRDDLGym_jax/core/tuning.py,sha256=LBhoVQZWWhYQj89gpM2B4xVHlYlKDt4psw4Be9cBbSY,23685
8
9
  pyRDDLGym_jax/core/visualization.py,sha256=uKhC8z0TeX9BklPNoxSVt0g5pkqhgxrQClQAih78ybY,68292
10
+ pyRDDLGym_jax/core/assets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ pyRDDLGym_jax/core/assets/favicon.ico,sha256=RMMrI9YvmF81TgYG7FO7UAre6WmYFkV3B2GmbA1l0kM,175085
9
12
  pyRDDLGym_jax/examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
13
  pyRDDLGym_jax/examples/run_gradient.py,sha256=KhXvijRDZ4V7N8NOI2WV8ePGpPna5_vnET61YwS7Tco,2919
11
14
  pyRDDLGym_jax/examples/run_gym.py,sha256=rXvNWkxe4jHllvbvU_EOMji_2-2k5d4tbBKhpMm_Gaw,1526
12
- pyRDDLGym_jax/examples/run_plan.py,sha256=jt-NQG5dByzHC6BFq8wByjGS8StzrRPZAefbOSv5tzU,2582
15
+ pyRDDLGym_jax/examples/run_plan.py,sha256=v2AvwgIa4Ejr626vBOgWFJIQvay3IPKWno02ztIFCYc,2768
13
16
  pyRDDLGym_jax/examples/run_scipy.py,sha256=wvcpWCvdjvYHntO95a7JYfY2fuCMUTKnqjJikW0PnL4,2291
14
- pyRDDLGym_jax/examples/run_tune.py,sha256=aaJQnL8vIEY67rPAK56Yugo__skHWShHchWieiEKjRU,3574
17
+ pyRDDLGym_jax/examples/run_tune.py,sha256=zqrhvLR5PeWJv0NsRxDCzAPmvgPgz_1NrtM1xBy6ndU,3606
15
18
  pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_drp.cfg,sha256=mE8MqhOlkHeXIGEVrnR3QY6I-_iy4uxFYRA71P1bmtk,347
16
19
  pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_replan.cfg,sha256=CI_c-E2Ij2dzVbYFA3sAUEXQBaIDImaEH15HpLqGQRw,370
17
20
  pyRDDLGym_jax/examples/configs/Cartpole_Continuous_gym_slp.cfg,sha256=T-O4faHYfSMyNNjY2jQ9XPK772szjbC7Enip5AaEO_0,340
@@ -38,8 +41,9 @@ pyRDDLGym_jax/examples/configs/default_slp.cfg,sha256=mJo0woDevhQCSQfJg30ULVy9qG
38
41
  pyRDDLGym_jax/examples/configs/tuning_drp.cfg,sha256=CQMpSCKTkGioO7U82mHMsYWFRsutULx0V6Wrl3YzV2U,504
39
42
  pyRDDLGym_jax/examples/configs/tuning_replan.cfg,sha256=m_0nozFg_GVld0tGv92Xao_KONFJDq_vtiJKt5isqI8,501
40
43
  pyRDDLGym_jax/examples/configs/tuning_slp.cfg,sha256=KHu8II6CA-h_HblwvWHylNRjSvvGS3VHxN7JQNR4p_Q,464
41
- pyRDDLGym_jax-1.1.dist-info/LICENSE,sha256=Y0Gi6H6mLOKN-oIKGZulQkoTJyPZeAaeuZu7FXH-meg,1095
42
- pyRDDLGym_jax-1.1.dist-info/METADATA,sha256=pjZ1hIeg_pbsnC8vwQwh_YeSlrvnUUCViYhYepYpYIM,14615
43
- pyRDDLGym_jax-1.1.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
44
- pyRDDLGym_jax-1.1.dist-info/top_level.txt,sha256=n_oWkP_BoZK0VofvPKKmBZ3NPk86WFNvLhi1BktCbVQ,14
45
- pyRDDLGym_jax-1.1.dist-info/RECORD,,
44
+ pyRDDLGym_jax-1.3.dist-info/LICENSE,sha256=Y0Gi6H6mLOKN-oIKGZulQkoTJyPZeAaeuZu7FXH-meg,1095
45
+ pyRDDLGym_jax-1.3.dist-info/METADATA,sha256=Colu-byYJ4RF5sr1qOVKg9VhCbrLnv32OvHt_A9KtLE,15090
46
+ pyRDDLGym_jax-1.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
47
+ pyRDDLGym_jax-1.3.dist-info/entry_points.txt,sha256=Q--z9QzqDBz1xjswPZ87PU-pib-WPXx44hUWAFoBGBA,59
48
+ pyRDDLGym_jax-1.3.dist-info/top_level.txt,sha256=n_oWkP_BoZK0VofvPKKmBZ3NPk86WFNvLhi1BktCbVQ,14
49
+ pyRDDLGym_jax-1.3.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.7.0)
2
+ Generator: setuptools (75.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ jaxplan = pyRDDLGym_jax.entry_point:main