gym-examples 3.0.128__py3-none-any.whl → 3.0.130__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "3.0.128"
9
+ __version__ = "3.0.130"
@@ -59,33 +59,10 @@ class WSNRoutingEnv(gym.Env):
59
59
  # Initialize the position of the sensors randomly
60
60
 
61
61
  # Define observation space
62
- # self.observation_space = Tuple(
63
- # tuple([self._get_observation_space() for _ in range(self.n_sensors)])
64
- # )
65
- # self.action_space = Tuple(tuple([Discrete(self.n_sensors + 1)] * self.n_agents))
66
-
67
- # Define the flattened observation space
68
- single_sensor_obs_space = self._get_observation_space()
69
- obs_low = np.concatenate([
70
- single_sensor_obs_space['remaining_energy'].low,
71
- single_sensor_obs_space['consumption_energy'].low,
72
- single_sensor_obs_space['sensor_positions'].low,
73
- single_sensor_obs_space['number_of_packets'].low
74
- ])
75
- obs_high = np.concatenate([
76
- single_sensor_obs_space['remaining_energy'].high,
77
- single_sensor_obs_space['consumption_energy'].high,
78
- single_sensor_obs_space['sensor_positions'].high,
79
- single_sensor_obs_space['number_of_packets'].high
80
- ])
81
- self.observation_space = Box(
82
- low=np.tile(obs_low, self.n_sensors),
83
- high=np.tile(obs_high, self.n_sensors),
84
- dtype=np.float32
62
+ self.observation_space = Tuple(
63
+ tuple([self._get_observation_space() for _ in range(self.n_sensors)])
85
64
  )
86
-
87
- # Define the flattened action space
88
- self.action_space = MultiDiscrete([self.n_sensors + 1] * self.n_agents)
65
+ self.action_space = Tuple(tuple([Discrete(self.n_sensors + 1)] * self.n_agents))
89
66
 
90
67
  self.reset()
91
68
 
@@ -196,7 +173,6 @@ class WSNRoutingEnv(gym.Env):
196
173
  return self._get_obs(), rewards, dones, {}
197
174
 
198
175
  # def _get_obs(self):
199
-
200
176
  # return [{'remaining_energy': np.array([e]),
201
177
  # 'consumption_energy': np.array([initial_energy - e]),
202
178
  # 'sensor_positions': p,
@@ -204,19 +180,18 @@ class WSNRoutingEnv(gym.Env):
204
180
  # } for e, p, d in zip(self.remaining_energy, self.sensor_positions, self.number_of_packets)]
205
181
 
206
182
  def _get_obs(self):
207
- # Structured observation
208
- structured_obs = [{'remaining_energy': np.array([e]),
209
- 'consumption_energy': np.array([initial_energy - e]),
210
- 'sensor_positions': p,
211
- 'number_of_packets': np.array([d])
212
- } for e, p, d in zip(self.remaining_energy, self.sensor_positions, self.number_of_packets)]
213
-
214
- # Flattened observation for compatibility with Stable Baselines3
215
- flat_obs = np.concatenate([
216
- np.concatenate([obs['remaining_energy'], obs['consumption_energy'], obs['sensor_positions'], obs['number_of_packets']])
217
- for obs in structured_obs
218
- ])
219
- return flat_obs
183
+ remaining_energy = np.array([e for e, _, _, _ in zip(self.remaining_energy, self.sensor_positions, self.number_of_packets)])
184
+ consumption_energy = np.array([initial_energy - e for e, _, _, _ in zip(self.remaining_energy, self.sensor_positions, self.number_of_packets)])
185
+ sensor_positions = np.stack(self.sensor_positions, axis=0)
186
+ number_of_packets = np.array([d for _, _, _, d in zip(self.remaining_energy, self.sensor_positions, self.number_of_packets)])
187
+
188
+ return {
189
+ 'remaining_energy': remaining_energy,
190
+ 'consumption_energy': consumption_energy,
191
+ 'sensor_positions': sensor_positions,
192
+ 'number_of_packets': number_of_packets
193
+ }
194
+
220
195
 
221
196
  def _get_observation_space(self):
222
197
  return Dict({
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.128
3
+ Version: 3.0.130
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=ephqHG1V21jR1hU69gk7ByNV2lo-xcz__sSSxL0TRiQ,194
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=jPME2gvkDL7BfMqYgjpAXna-mTAv3_81HlPs-U5FnTM,22286
4
+ gym_examples-3.0.130.dist-info/METADATA,sha256=28_KBJf-MOIyD0HgbmzMvu1UlAcH5s-o0az8Qd1pN1Y,412
5
+ gym_examples-3.0.130.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.130.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.130.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=HtlSQQsIPVhRkM90RZRf4j2KCKHhI4jQzcBnLzxNT1U,194
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=cPfhzLLoAMoBonuR7PdgS-OjU9e1AbaJlgRo4AxHTqY,23329
4
- gym_examples-3.0.128.dist-info/METADATA,sha256=CCcUSZp8Tyavj_Gxy9eWmZH1XJgvrMbItHuVR6BlRXM,412
5
- gym_examples-3.0.128.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.128.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.128.dist-info/RECORD,,