gr-libs 0.1.5__py3-none-any.whl → 0.1.6.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
CI/README.md ADDED
@@ -0,0 +1,12 @@
1
+ ## How to build a new docker image including new trained agents:
2
+ 1. Install docker
3
+ 2. Make sure you have a dataset.zip at your repo root
4
+ 3. Make sure you have a classic token in github: https://github.com/settings/tokens . If you don't, create one with package write, read and delete permissions and copy it somewhere safe.
5
+ 4. Authenticate to ghcr with docker by running:
6
+ ```sh
7
+ echo ghp_REST_OF_TOKEN | docker login ghcr.io -u MatanShamir1 --password-stdin
8
+ ```
9
+ 3. docker build -t ghcr.io/<your-username>/gr_test_base:latest -f CI/Dockerfile .
10
+ (the -f Dockerfile tells docker which Dockerfile to use and the '.' tells docker what's the build context, or where the dataset.zip should live)
11
+ 4. docker push ghcr.io/<your-username>/gr_test_base:latest
12
+ docker push ghcr.io/MatanShamir1/gr_test_base:latest
@@ -0,0 +1,15 @@
1
+ FROM python:3.11-slim
2
+
3
+ # Set workdir
4
+ WORKDIR /app
5
+
6
+ # Install unzip
7
+ RUN apt-get update && apt-get install -y unzip && rm -rf /var/lib/apt/lists/*
8
+
9
+ # Copy and unzip the dataset
10
+ COPY dataset.zip .
11
+ RUN unzip dataset.zip && rm dataset.zip
12
+ RUN mv dataset_new dataset
13
+
14
+ # Just start with bash by default
15
+ CMD [ "bash" ]
gr_libs/_version.py ADDED
@@ -0,0 +1,21 @@
1
+ # file generated by setuptools-scm
2
+ # don't change, don't track in version control
3
+
4
+ __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
5
+
6
+ TYPE_CHECKING = False
7
+ if TYPE_CHECKING:
8
+ from typing import Tuple
9
+ from typing import Union
10
+
11
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
12
+ else:
13
+ VERSION_TUPLE = object
14
+
15
+ version: str
16
+ __version__: str
17
+ __version_tuple__: VERSION_TUPLE
18
+ version_tuple: VERSION_TUPLE
19
+
20
+ __version__ = version = '0.1.6.post1'
21
+ __version_tuple__ = version_tuple = (0, 1, 6)
@@ -12,11 +12,11 @@ def is_extra_installed(package: str, extra: str) -> bool:
12
12
  return False # The package is not installed
13
13
 
14
14
  # Check if `gr_libs[minigrid]` was installed
15
- for env in ["minigrid", "panda", "parking", "point_maze"]:
15
+ for env in ["minigrid", "panda", "highway", "point_maze"]:
16
16
  if is_extra_installed("gr_libs", f"gr_envs[{env}]"):
17
17
  try:
18
18
  importlib.import_module(f"gr_envs.{env}_scripts.envs")
19
19
  except ImportError:
20
- raise ImportError(f"gr_libs[{env}] was not installed, but gr_libs[{env}] requires it! if you messed with gr_libs installation, you can reinstall gr_libs.")
20
+ raise ImportError(f"gr_envs[{env}] was not installed, but gr_libs[{env}] requires it! if you messed with gr_envs installation, you can reinstall gr_libs.")
21
21
  else:
22
22
  warnings.warn(f"gr_libs[{env}] was not installed, skipping {env} imports.", RuntimeWarning)
@@ -105,7 +105,7 @@ class MinigridProperty(EnvProperty):
105
105
  env_id = problem_name.split("-DynamicGoal-")[0] + "-DynamicGoal-" + problem_name.split("-DynamicGoal-")[1]
106
106
  result = register(
107
107
  id=env_id,
108
- entry_point="gr_libss.minigrid_scripts.envs:CustomColorEnv",
108
+ entry_point="gr_envs.minigrid_scripts.envs:CustomColorEnv",
109
109
  kwargs={"size": 13 if 'Simple' in problem_name else 9,
110
110
  "num_crossings": 4 if 'Simple' in problem_name else 3,
111
111
  "goal_pos": self.str_to_goal(problem_name),
@@ -5,7 +5,6 @@ import numpy as np
5
5
 
6
6
  from typing import Callable, Generator, List, Dict, Tuple, Any
7
7
  from math import log2
8
- from numpy.core.fromnumeric import mean
9
8
  from scipy.stats import wasserstein_distance
10
9
  from gymnasium.spaces.discrete import Discrete
11
10
  # import torch
@@ -43,7 +42,7 @@ def kl_divergence_norm_softmax(observations: List[Tuple[State, Any]], agent, act
43
42
  qp2_flatten_distribution_list: List[float] = agent.get_actions_probabilities(
44
43
  observation=(observation, agent_pos))
45
44
  distances.append(kl_divergence(qp1, qp2_flatten_distribution_list))
46
- return mean(distances)
45
+ return np.mean(distances)
47
46
 
48
47
 
49
48
  def amplify(values, alpha=1.0):
File without changes