multi-agent-rlenv 3.6.1__tar.gz → 3.6.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/PKG-INFO +1 -1
  2. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/pyproject.toml +1 -1
  3. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/catalog/deepsea.py +1 -1
  4. multi_agent_rlenv-3.6.2/tests/test_deepsea.py +43 -0
  5. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/.github/workflows/ci.yaml +0 -0
  6. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/.github/workflows/docs.yaml +0 -0
  7. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/.gitignore +0 -0
  8. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/LICENSE +0 -0
  9. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/README.md +0 -0
  10. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/__init__.py +0 -0
  11. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/adapters/__init__.py +0 -0
  12. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/adapters/gym_adapter.py +0 -0
  13. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/adapters/pettingzoo_adapter.py +0 -0
  14. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/adapters/pymarl_adapter.py +0 -0
  15. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/adapters/smac_adapter.py +0 -0
  16. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/catalog/__init__.py +0 -0
  17. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/env_builder.py +0 -0
  18. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/env_pool.py +0 -0
  19. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/exceptions.py +0 -0
  20. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/mock_env.py +0 -0
  21. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/models/__init__.py +0 -0
  22. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/models/env.py +0 -0
  23. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/models/episode.py +0 -0
  24. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/models/observation.py +0 -0
  25. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/models/spaces.py +0 -0
  26. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/models/state.py +0 -0
  27. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/models/step.py +0 -0
  28. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/models/transition.py +0 -0
  29. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/py.typed +0 -0
  30. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/utils/__init__.py +0 -0
  31. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/utils/cached_property_collector.py +0 -0
  32. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/utils/import_placeholders.py +0 -0
  33. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/utils/schedule.py +0 -0
  34. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/__init__.py +0 -0
  35. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/action_randomizer.py +0 -0
  36. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/agent_id_wrapper.py +0 -0
  37. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/available_actions_mask.py +0 -0
  38. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/available_actions_wrapper.py +0 -0
  39. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/blind_wrapper.py +0 -0
  40. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/centralised.py +0 -0
  41. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/delayed_rewards.py +0 -0
  42. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/last_action_wrapper.py +0 -0
  43. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/paddings.py +0 -0
  44. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/penalty_wrapper.py +0 -0
  45. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/potential_shaping.py +0 -0
  46. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/rlenv_wrapper.py +0 -0
  47. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/time_limit.py +0 -0
  48. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/src/marlenv/wrappers/video_recorder.py +0 -0
  49. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/__init__.py +0 -0
  50. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_adapters.py +0 -0
  51. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_catalog.py +0 -0
  52. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_episode.py +0 -0
  53. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_models.py +0 -0
  54. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_others.py +0 -0
  55. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_pool.py +0 -0
  56. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_schedules.py +0 -0
  57. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_serialization.py +0 -0
  58. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_spaces.py +0 -0
  59. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/test_wrappers.py +0 -0
  60. {multi_agent_rlenv-3.6.1 → multi_agent_rlenv-3.6.2}/tests/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: multi-agent-rlenv
3
- Version: 3.6.1
3
+ Version: 3.6.2
4
4
  Summary: A strongly typed Multi-Agent Reinforcement Learning framework
5
5
  Project-URL: repository, https://github.com/yamoling/multi-agent-rlenv
6
6
  Author-email: Yannick Molinghen <yannick.molinghen@ulb.be>
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "multi-agent-rlenv"
3
- version = "3.6.1"
3
+ version = "3.6.2"
4
4
  description = "A strongly typed Multi-Agent Reinforcement Learning framework"
5
5
  authors = [
6
6
  { "name" = "Yannick Molinghen", "email" = "yannick.molinghen@ulb.be" },
@@ -34,7 +34,7 @@ class DeepSea(MARLEnv[MultiDiscreteSpace]):
34
34
  self._step_right_penalty = -0.01 / self.max_depth
35
35
 
36
36
  def get_observation(self) -> Observation:
37
- return Observation(np.array([self._row, self._col], dtype=np.float32), self.available_actions())
37
+ return Observation(np.array([[self._row, self._col]], dtype=np.float32), self.available_actions())
38
38
 
39
39
  def get_state(self) -> State:
40
40
  return State(np.array([self._row, self._col], dtype=np.float32))
@@ -0,0 +1,43 @@
1
+ from marlenv.catalog.deepsea import DeepSea, LEFT, RIGHT
2
+
3
+
4
+ def test_env():
5
+ env = DeepSea(20)
6
+ assert env.n_actions == 2
7
+ assert env.action_space.is_discrete
8
+ assert env.observation_shape == (2,)
9
+ assert env.n_agents == 1
10
+
11
+
12
+ def test_reset():
13
+ env = DeepSea(20)
14
+ obs, state = env.reset()
15
+ assert obs.shape == (2,)
16
+ assert state.shape == (2,)
17
+
18
+ assert obs.data[0][0] == 0
19
+ assert state.data[0] == 0
20
+ assert obs.data[0][1] == 0
21
+ assert state.data[1] == 0
22
+
23
+
24
+ def test_step():
25
+ env = DeepSea(20)
26
+ env.reset()
27
+ step = env.step([RIGHT])
28
+ obs = step.obs
29
+ assert obs.data[0][0] == 1
30
+ assert obs.data[0][1] == 1
31
+ assert step.reward.item() < 0
32
+
33
+ step = env.step([LEFT])
34
+ obs = step.obs
35
+ assert obs.data[0][0] == 2
36
+ assert obs.data[0][1] == 0
37
+ assert step.reward.item() == 0.0
38
+
39
+ step = env.step([LEFT])
40
+ obs = step.obs
41
+ assert obs.data[0][0] == 3
42
+ assert obs.data[0][1] == 0
43
+ assert step.reward.item() == 0.0