multi-agent-rlenv 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. multi_agent_rlenv-1.0.0/.github/workflows/ci.yaml +95 -0
  2. multi_agent_rlenv-1.0.0/.gitignore +187 -0
  3. multi_agent_rlenv-1.0.0/LICENSE +21 -0
  4. multi_agent_rlenv-1.0.0/PKG-INFO +50 -0
  5. multi_agent_rlenv-1.0.0/README.md +35 -0
  6. multi_agent_rlenv-1.0.0/pyproject.toml +39 -0
  7. multi_agent_rlenv-1.0.0/src/main.py +0 -0
  8. multi_agent_rlenv-1.0.0/src/marlenv/__init__.py +52 -0
  9. multi_agent_rlenv-1.0.0/src/marlenv/adapters/__init__.py +24 -0
  10. multi_agent_rlenv-1.0.0/src/marlenv/adapters/gym_adapter.py +64 -0
  11. multi_agent_rlenv-1.0.0/src/marlenv/adapters/pettingzoo_adapter.py +58 -0
  12. multi_agent_rlenv-1.0.0/src/marlenv/adapters/pymarl_adapter.py +86 -0
  13. multi_agent_rlenv-1.0.0/src/marlenv/adapters/smac_adapter.py +59 -0
  14. multi_agent_rlenv-1.0.0/src/marlenv/env_builder.py +161 -0
  15. multi_agent_rlenv-1.0.0/src/marlenv/exceptions.py +6 -0
  16. multi_agent_rlenv-1.0.0/src/marlenv/mock_env.py +63 -0
  17. multi_agent_rlenv-1.0.0/src/marlenv/models/__init__.py +20 -0
  18. multi_agent_rlenv-1.0.0/src/marlenv/models/episode.py +240 -0
  19. multi_agent_rlenv-1.0.0/src/marlenv/models/observation.py +67 -0
  20. multi_agent_rlenv-1.0.0/src/marlenv/models/rl_env.py +134 -0
  21. multi_agent_rlenv-1.0.0/src/marlenv/models/spaces.py +137 -0
  22. multi_agent_rlenv-1.0.0/src/marlenv/models/transition.py +75 -0
  23. multi_agent_rlenv-1.0.0/src/marlenv/py.typed +0 -0
  24. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/__init__.py +27 -0
  25. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/agent_id_wrapper.py +23 -0
  26. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/available_actions_mask.py +28 -0
  27. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/available_actions_wrapper.py +24 -0
  28. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/blind_wrapper.py +26 -0
  29. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/centralised.py +61 -0
  30. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/last_action_wrapper.py +32 -0
  31. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/paddings.py +47 -0
  32. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/penalty_wrapper.py +18 -0
  33. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/rlenv_wrapper.py +70 -0
  34. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/time_limit.py +75 -0
  35. multi_agent_rlenv-1.0.0/src/marlenv/wrappers/video_recorder.py +61 -0
  36. multi_agent_rlenv-1.0.0/tests/__init__.py +0 -0
  37. multi_agent_rlenv-1.0.0/tests/test_adapters.py +149 -0
  38. multi_agent_rlenv-1.0.0/tests/test_episode.py +187 -0
  39. multi_agent_rlenv-1.0.0/tests/test_models.py +207 -0
  40. multi_agent_rlenv-1.0.0/tests/test_serialization.py +36 -0
  41. multi_agent_rlenv-1.0.0/tests/test_spaces.py +120 -0
  42. multi_agent_rlenv-1.0.0/tests/test_wrappers.py +237 -0
@@ -0,0 +1,95 @@
1
+ name: Build & Test
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ - master
8
+ - dev
9
+ tags:
10
+ - '*'
11
+ pull_request:
12
+ workflow_dispatch:
13
+
14
+ permissions:
15
+ contents: read
16
+
17
+ jobs:
18
+ test:
19
+ name: 🧪 Test
20
+ strategy:
21
+ matrix:
22
+ os:
23
+ - ubuntu-latest
24
+ - windows-latest
25
+ - macOS-latest
26
+ target:
27
+ - x86_64
28
+ - aarch64
29
+ python-version:
30
+ - '3.10'
31
+ - '3.11'
32
+ - '3.12' # Dependency ale-py does not yet support Python 3.12
33
+ runs-on: ${{ matrix.os }}
34
+ steps:
35
+ # Checkout the repository
36
+ - name: Checkout
37
+ uses: actions/checkout@v4
38
+ - name: Python setup
39
+ uses: actions/setup-python@v5
40
+ with:
41
+ python-version: ${{ matrix.python-version }}
42
+ - name: Install uv
43
+ uses: yezz123/setup-uv@v4
44
+ with:
45
+ uv-version: 0.3.0
46
+ - name: Install dependencies and run pytest
47
+ run: |
48
+ uv sync
49
+ uv run pytest -n 4
50
+
51
+ build:
52
+ name: 📦 Build package
53
+ if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes
54
+ needs: test
55
+ runs-on: ubuntu-latest
56
+ steps:
57
+ - uses: actions/checkout@v4
58
+ - name: Set up Python
59
+ uses: actions/setup-python@v5
60
+ with:
61
+ python-version: 3.12
62
+ - name: Install UV
63
+ uses: yezz123/setup-uv@v4
64
+ with:
65
+ uv-version: 0.3.0
66
+ - name: Build wheels
67
+ run: |
68
+ uv venv
69
+ uv pip install pip build
70
+ uv run python -m build
71
+
72
+ - name: 📡 Upload package distributions
73
+ uses: actions/upload-artifact@v3
74
+ with:
75
+ name: wheels
76
+ path: dist/
77
+
78
+
79
+ publish-to-pypi:
80
+ name: 📤 Publish to PyPI
81
+ needs: build
82
+ runs-on: ubuntu-latest
83
+ environment:
84
+ name: pypi
85
+ url: https://pypi.org/p/multi-agent-rlenv
86
+ permissions:
87
+ id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
88
+ steps:
89
+ - name: 📡 Download package distributions
90
+ uses: actions/download-artifact@v3
91
+ with:
92
+ name: wheels
93
+ path: dist/
94
+ - name: 📤 Publish 📦 to PyPI
95
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,187 @@
1
+
2
+ # Created by https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
3
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python,visualstudiocode
4
+
5
+ ### Python ###
6
+ # Byte-compiled / optimized / DLL files
7
+ __pycache__/
8
+ *.py[cod]
9
+ *$py.class
10
+
11
+ # C extensions
12
+ *.so
13
+
14
+ # Distribution / packaging
15
+ .Python
16
+ build/
17
+ develop-eggs/
18
+ dist/
19
+ downloads/
20
+ eggs/
21
+ .eggs/
22
+ lib/
23
+ lib64/
24
+ parts/
25
+ sdist/
26
+ var/
27
+ wheels/
28
+ share/python-wheels/
29
+ *.egg-info/
30
+ .installed.cfg
31
+ *.egg
32
+ MANIFEST
33
+
34
+ # PyInstaller
35
+ # Usually these files are written by a python script from a template
36
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
37
+ *.manifest
38
+ *.spec
39
+
40
+ # Installer logs
41
+ pip-log.txt
42
+ pip-delete-this-directory.txt
43
+
44
+ # Unit test / coverage reports
45
+ htmlcov/
46
+ .tox/
47
+ .nox/
48
+ .coverage
49
+ .coverage.*
50
+ .cache
51
+ nosetests.xml
52
+ coverage.xml
53
+ *.cover
54
+ *.py,cover
55
+ .hypothesis/
56
+ .pytest_cache/
57
+ cover/
58
+
59
+ # Translations
60
+ *.mo
61
+ *.pot
62
+
63
+ # Django stuff:
64
+ *.log
65
+ local_settings.py
66
+ db.sqlite3
67
+ db.sqlite3-journal
68
+
69
+ # Flask stuff:
70
+ instance/
71
+ .webassets-cache
72
+
73
+ # Scrapy stuff:
74
+ .scrapy
75
+
76
+ # Sphinx documentation
77
+ docs/_build/
78
+
79
+ # PyBuilder
80
+ .pybuilder/
81
+ target/
82
+
83
+ # Jupyter Notebook
84
+ .ipynb_checkpoints
85
+
86
+ # IPython
87
+ profile_default/
88
+ ipython_config.py
89
+
90
+ # pyenv
91
+ # For a library or package, you might want to ignore these files since the code is
92
+ # intended to run in multiple environments; otherwise, check them in:
93
+ # .python-version
94
+
95
+ # pipenv
96
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
97
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
98
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
99
+ # install all needed dependencies.
100
+ #Pipfile.lock
101
+
102
+ # poetry
103
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
104
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
105
+ # commonly ignored for libraries.
106
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
107
+ #poetry.lock
108
+
109
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
110
+ __pypackages__/
111
+
112
+ # Celery stuff
113
+ celerybeat-schedule
114
+ celerybeat.pid
115
+
116
+ # SageMath parsed files
117
+ *.sage.py
118
+
119
+ # Environments
120
+ .env
121
+ .venv
122
+ ./env/
123
+ venv/
124
+ ENV/
125
+ env.bak/
126
+ venv.bak/
127
+
128
+ # Spyder project settings
129
+ .spyderproject
130
+ .spyproject
131
+
132
+ # Rope project settings
133
+ .ropeproject
134
+
135
+ # mkdocs documentation
136
+ /site
137
+
138
+ # mypy
139
+ .mypy_cache/
140
+ .dmypy.json
141
+ dmypy.json
142
+
143
+ # Pyre type checker
144
+ .pyre/
145
+
146
+ # pytype static type analyzer
147
+ .pytype/
148
+
149
+ # Cython debug symbols
150
+ cython_debug/
151
+
152
+ # PyCharm
153
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
154
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
155
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
156
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
157
+ #.idea/
158
+
159
+ ### VisualStudioCode ###
160
+ .vscode
161
+
162
+
163
+ # Local History for Visual Studio Code
164
+ .history/
165
+
166
+ # Built Visual Studio Code Extensions
167
+ *.vsix
168
+
169
+ ### VisualStudioCode Patch ###
170
+ # Ignore all local history of files
171
+ .history
172
+ .ionide
173
+
174
+ # Support for Project snippet scope
175
+
176
+ # End of https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
177
+
178
+ # Do not push nohup logs
179
+ nohup.out
180
+ # Do not push Docker configuration
181
+ .devcontainer
182
+ # Do not push Tensorboard reports
183
+ runs
184
+ logs
185
+ data/game_images/*.png
186
+ poetry.lock
187
+ uv.lock
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Yannick Molinghen
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,50 @@
1
+ Metadata-Version: 2.3
2
+ Name: multi-agent-rlenv
3
+ Version: 1.0.0
4
+ Summary: A strongly typed Multi-Agent Reinforcement Learning framework
5
+ Project-URL: repository, https://github.com/yamoling/ma-rlenv
6
+ Author-email: Yannick Molinghen <yannick.molinghen@ulb.be>
7
+ License-File: LICENSE
8
+ Classifier: Operating System :: OS Independent
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: <4,>=3.10
11
+ Requires-Dist: gymnasium>=0.29.1
12
+ Requires-Dist: numpy>=2.0.0
13
+ Requires-Dist: opencv-python>=4.10.0.84
14
+ Description-Content-Type: text/markdown
15
+
16
+ # RLEnv: yet another RL framework
17
+ This framework aims at high level abstractions of RL models, allowing to build algorithms on top of it.
18
+
19
+ ## Designing an environment
20
+ To create an environment that is compatible with RLEnv, you should inherit from the `RLEnv` class.
21
+
22
+ ## Instanciating an environment
23
+ ### Simple environments
24
+ ```python
25
+ import marlenv as menv
26
+ print(menv.__version__)
27
+
28
+ # From Gym
29
+ env = menv.make("CartPole-v1")
30
+
31
+ # From pettingzoo
32
+ from pettingzoo.sisl import pursuit_v4
33
+ env = menv.make(pursuit_v4.parallel_env())
34
+ ```
35
+
36
+ ### Adding extra information to the observations
37
+ ```python
38
+ import marlenv as menv
39
+ # Building the environment with additional information
40
+ from pettingzoo.sisl import pursuit_v4
41
+ env = menv.Builder(pursuit_v4.parallel_env())\
42
+ .with_agent_id()\
43
+ .with_last_action()\
44
+ .build()
45
+ # 8 agents + 5 actions = 13 extras
46
+ assert env.extra_feature_shape == (13, )
47
+ ```
48
+
49
+ # Related projects
50
+ - MARL: multi-agent reinforcement framework [https://github.com/yamoling/marl](https://github.com/yamoling/marl)
@@ -0,0 +1,35 @@
1
+ # RLEnv: yet another RL framework
2
+ This framework aims at high level abstractions of RL models, allowing to build algorithms on top of it.
3
+
4
+ ## Designing an environment
5
+ To create an environment that is compatible with RLEnv, you should inherit from the `RLEnv` class.
6
+
7
+ ## Instanciating an environment
8
+ ### Simple environments
9
+ ```python
10
+ import marlenv as menv
11
+ print(menv.__version__)
12
+
13
+ # From Gym
14
+ env = menv.make("CartPole-v1")
15
+
16
+ # From pettingzoo
17
+ from pettingzoo.sisl import pursuit_v4
18
+ env = menv.make(pursuit_v4.parallel_env())
19
+ ```
20
+
21
+ ### Adding extra information to the observations
22
+ ```python
23
+ import marlenv as menv
24
+ # Building the environment with additional information
25
+ from pettingzoo.sisl import pursuit_v4
26
+ env = menv.Builder(pursuit_v4.parallel_env())\
27
+ .with_agent_id()\
28
+ .with_last_action()\
29
+ .build()
30
+ # 8 agents + 5 actions = 13 extras
31
+ assert env.extra_feature_shape == (13, )
32
+ ```
33
+
34
+ # Related projects
35
+ - MARL: multi-agent reinforcement framework [https://github.com/yamoling/marl](https://github.com/yamoling/marl)
@@ -0,0 +1,39 @@
1
+ [project]
2
+ name = "multi-agent-rlenv"
3
+ version = "1.0.0"
4
+ description = "A strongly typed Multi-Agent Reinforcement Learning framework"
5
+ authors = [
6
+ { "name" = "Yannick Molinghen", "email" = "yannick.molinghen@ulb.be" },
7
+ ]
8
+ readme = "README.md"
9
+ requires-python = ">=3.10, <4"
10
+ dependencies = ["numpy>=2.0.0", "opencv-python>=4.10.0.84", "gymnasium>=0.29.1"]
11
+ urls = { "repository" = "https://github.com/yamoling/ma-rlenv" }
12
+ classifiers = [
13
+ "Programming Language :: Python :: 3",
14
+ "Operating System :: OS Independent",
15
+ ]
16
+
17
+
18
+ [build-system]
19
+ requires = ["hatchling"]
20
+ build-backend = "hatchling.build"
21
+
22
+ [tool.ruff]
23
+ line-length = 140
24
+
25
+ [tool.hatch.build.targets.wheel]
26
+ packages = ["src/marlenv"]
27
+
28
+ [tool.hatch.metadata]
29
+ allow-direct-references = true
30
+
31
+ [tool.uv]
32
+ dev-dependencies = ["pytest>=8.3.2", "pytest-xdist>=3.6.1"]
33
+
34
+
35
+ [tool.pytest.ini_options]
36
+ # Run tests on the maximum number of available CPUs
37
+ addopts = "-n auto"
38
+ testpaths = ["tests"]
39
+ pythonpath = "src"
File without changes
@@ -0,0 +1,52 @@
1
+ """
2
+ RLEnv is a strongly typed library for multi-agent and multi-objective reinforcement learning.
3
+
4
+ RLEnv
5
+ - provides a simple and consistent interface for reinforcement learning environments
6
+ - provides fundamental models such as `Observation`s, `Episode`s, `Transition`s, ...
7
+ - works with gymnasium, pettingzoo and SMAC out of the box
8
+ - provides helpful wrappers to add intrinsic rewards, agent ids, record videos, ...
9
+ """
10
+
11
+ __version__ = "1.0.4"
12
+
13
+ from . import models
14
+ from . import wrappers
15
+ from . import adapters
16
+ from .models import spaces
17
+
18
+
19
+ from .env_builder import make, Builder
20
+ from .models import (
21
+ RLEnv,
22
+ Observation,
23
+ Episode,
24
+ EpisodeBuilder,
25
+ Transition,
26
+ DiscreteSpace,
27
+ ContinuousSpace,
28
+ ActionSpace,
29
+ DiscreteActionSpace,
30
+ ContinuousActionSpace,
31
+ )
32
+ from .mock_env import MockEnv
33
+
34
+ __all__ = [
35
+ "models",
36
+ "wrappers",
37
+ "adapters",
38
+ "spaces",
39
+ "make",
40
+ "Builder",
41
+ "RLEnv",
42
+ "Observation",
43
+ "Episode",
44
+ "EpisodeBuilder",
45
+ "Transition",
46
+ "ActionSpace",
47
+ "DiscreteSpace",
48
+ "ContinuousSpace",
49
+ "DiscreteActionSpace",
50
+ "ContinuousActionSpace",
51
+ "MockEnv",
52
+ ]
@@ -0,0 +1,24 @@
1
+ from .pymarl_adapter import PymarlAdapter
2
+ from typing import Any
3
+
4
+ __all__ = ["PymarlAdapter"]
5
+ try:
6
+ from .gym_adapter import Gym
7
+
8
+ __all__.append("Gym")
9
+ except ImportError:
10
+ Gym = Any
11
+
12
+ try:
13
+ from .pettingzoo_adapter import PettingZoo
14
+
15
+ __all__.append("PettingZoo")
16
+ except ImportError:
17
+ PettingZoo = Any
18
+
19
+ try:
20
+ from .smac_adapter import SMAC
21
+
22
+ __all__.append("SMAC")
23
+ except ImportError:
24
+ SMAC = Any
@@ -0,0 +1,64 @@
1
+ from gymnasium import Env, spaces
2
+ import numpy as np
3
+
4
+ from marlenv.models import (
5
+ RLEnv,
6
+ Observation,
7
+ ActionSpace,
8
+ DiscreteSpace,
9
+ ContinuousSpace,
10
+ )
11
+
12
+
13
+ class Gym(RLEnv[ActionSpace]):
14
+ """Wraps a gym envronment in an RLEnv"""
15
+
16
+ def __init__(self, env: Env):
17
+ if env.observation_space.shape is None:
18
+ raise NotImplementedError("Observation space must have a shape")
19
+ match env.action_space:
20
+ case spaces.Discrete() as s:
21
+ space = ActionSpace(1, DiscreteSpace(int(s.n)))
22
+ case spaces.Box() as s:
23
+ low = s.low.astype(np.float32)
24
+ high = s.high.astype(np.float32)
25
+ if not isinstance(low, np.ndarray):
26
+ low = np.full(s.shape, s.low, dtype=np.float32)
27
+ if not isinstance(high, np.ndarray):
28
+ high = np.full(s.shape, s.high, dtype=np.float32)
29
+ space = ActionSpace(1, ContinuousSpace(low=low, high=high))
30
+ case other:
31
+ raise NotImplementedError(f"Action space {other} not supported")
32
+ super().__init__(space, env.observation_space.shape, (1,))
33
+ self.env = env
34
+ if self.env.unwrapped.spec is not None:
35
+ self.name = self.env.unwrapped.spec.id
36
+ else:
37
+ self.name = "gym-no-id"
38
+
39
+ def step(self, actions):
40
+ obs_, reward, done, truncated, info = self.env.step(list(actions)[0])
41
+ obs_ = Observation(
42
+ np.array([obs_], dtype=np.float32),
43
+ self.available_actions(),
44
+ self.get_state(),
45
+ )
46
+ return obs_, np.array([reward], dtype=np.float32), done, truncated, info
47
+
48
+ def get_state(self):
49
+ return np.zeros(1, dtype=np.float32)
50
+
51
+ def reset(self):
52
+ obs_data, _info = self.env.reset()
53
+ obs = Observation(
54
+ np.array([obs_data], dtype=np.float32),
55
+ self.available_actions(),
56
+ self.get_state(),
57
+ )
58
+ return obs
59
+
60
+ def render(self, mode: str = "human"):
61
+ return self.env.render()
62
+
63
+ def seed(self, seed_value: int):
64
+ self.env.reset(seed=seed_value)
@@ -0,0 +1,58 @@
1
+ from pettingzoo import ParallelEnv
2
+ from gymnasium import spaces # pettingzoo uses gymnasium spaces
3
+ from marlenv.models import RLEnv, Observation, ActionSpace, DiscreteActionSpace, ContinuousActionSpace, ContinuousSpace
4
+ import numpy as np
5
+ import numpy.typing as npt
6
+
7
+
8
+ class PettingZoo(RLEnv[ActionSpace]):
9
+ def __init__(self, env: ParallelEnv):
10
+ env.reset()
11
+ aspace = env.action_space(env.possible_agents[0])
12
+
13
+ match aspace:
14
+ case spaces.Discrete() as s:
15
+ space = DiscreteActionSpace(env.num_agents, int(s.n))
16
+
17
+ case spaces.Box() as s:
18
+ low = s.low.astype(np.float32)
19
+ high = s.high.astype(np.float32)
20
+ if not isinstance(low, np.ndarray):
21
+ low = np.full(s.shape, s.low, dtype=np.float32)
22
+ if not isinstance(high, np.ndarray):
23
+ high = np.full(s.shape, s.high, dtype=np.float32)
24
+ space = ContinuousActionSpace(env.num_agents, low, high=high)
25
+ case other:
26
+ raise NotImplementedError(f"Action space {other} not supported")
27
+
28
+ obs_space = env.observation_space(env.possible_agents[0])
29
+ if obs_space.shape is None:
30
+ raise NotImplementedError("Only discrete observation spaces are supported")
31
+ self._env = env
32
+ super().__init__(space, obs_space.shape, self.get_state().shape)
33
+ self.agents = env.possible_agents
34
+
35
+ def get_state(self):
36
+ try:
37
+ return self._env.state()
38
+ except NotImplementedError:
39
+ return np.array([0])
40
+
41
+ def step(self, actions: npt.NDArray[np.int64]):
42
+ action_dict = dict(zip(self.agents, actions))
43
+ obs, reward, term, trunc, info = self._env.step(action_dict)
44
+ obs_data = np.array([v for v in obs.values()])
45
+ reward = np.sum([r for r in reward.values()], keepdims=True)
46
+ observation = Observation(obs_data, self.available_actions(), self.get_state())
47
+ return observation, reward, any(term.values()), any(trunc.values()), info
48
+
49
+ def reset(self) -> Observation:
50
+ obs = self._env.reset()[0]
51
+ obs_data = np.array([v for v in obs.values()])
52
+ return Observation(obs_data, self.available_actions(), self.get_state())
53
+
54
+ def seed(self, seed_value: int):
55
+ self._env.reset(seed=seed_value)
56
+
57
+ def render(self, *_):
58
+ return self._env.render()