gym-examples 3.0.136__py3-none-any.whl → 3.0.138__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "3.0.136"
9
+ __version__ = "3.0.138"
@@ -59,21 +59,11 @@ class WSNRoutingEnv(gym.Env):
59
59
  # Initialize the position of the sensors randomly
60
60
 
61
61
  # Define observation space
62
- # self.observation_space = Tuple(
63
- # tuple([self._get_observation_space() for _ in range(self.n_sensors)])
64
- # )
62
+ self.observation_space = Tuple(
63
+ tuple([self._get_observation_space() for _ in range(self.n_sensors)])
64
+ )
65
65
 
66
- # self.observation_space = gym.spaces.Dict({
67
- # 'remaining_energy': gym.spaces.Box(low=0, high=initial_energy, shape=(self.n_agents,), dtype=np.float32),
68
- # 'consumption_energy': gym.spaces.Box(low=0, high=initial_energy, shape=(self.n_agents,), dtype=np.float32),
69
- # 'sensor_positions': gym.spaces.Box(low=lower_bound, high=upper_bound, shape=(self.n_agents, 2), dtype=np.float32),
70
- # 'number_of_packets': gym.spaces.Box(low=0, high=self.n_sensors * initial_number_of_packets + 1, shape=(self.n_agents,), dtype=int)
71
- # })
72
-
73
- self.observation_space = self._get_observation_space()
74
-
75
- # self.action_space = Tuple(tuple([Discrete(self.n_sensors + 1)] * self.n_agents))
76
- self.action_space = gym.spaces.MultiDiscrete([self.n_sensors + 1] * self.n_agents)
66
+ self.action_space = Tuple(tuple([Discrete(self.n_sensors + 1)] * self.n_agents))
77
67
 
78
68
  self.reset()
79
69
 
@@ -183,45 +173,20 @@ class WSNRoutingEnv(gym.Env):
183
173
 
184
174
  return self._get_obs(), rewards, dones, {}
185
175
 
186
- # def _get_obs(self):
187
- # return [{'remaining_energy': np.array([e]),
188
- # 'consumption_energy': np.array([initial_energy - e]),
189
- # 'sensor_positions': p,
190
- # 'number_of_packets': np.array([d])
191
- # } for e, p, d in zip(self.remaining_energy, self.sensor_positions, self.number_of_packets)]
192
-
193
- # def _get_observation_space(self):
194
- # return Dict({
195
- # 'remaining_energy': Box(low=0, high=initial_energy, shape=(1,), dtype=np.float64),
196
- # 'consumption_energy': Box(low=0, high=initial_energy, shape=(1,), dtype=np.float64),
197
- # 'sensor_positions': Box(low=lower_bound, high=upper_bound, shape=(2,), dtype=np.float64),
198
- # 'number_of_packets': Box(low=0, high=self.n_sensors * initial_number_of_packets + 1, shape=(1,), dtype=int)
199
- # })
200
-
201
176
  def _get_obs(self):
202
- return {
203
- 'remaining_energy': np.array(self.remaining_energy),
204
- 'consumption_energy': np.array([initial_energy - e for e in self.remaining_energy]),
205
- 'sensor_positions': np.stack(self.sensor_positions, axis=0),
206
- 'number_of_packets': np.array(self.number_of_packets)
207
- }
208
-
209
- # def _get_observation_space(self):
210
- # return gym.spaces.Dict({
211
- # 'remaining_energy': gym.spaces.Box(low=0, high=initial_energy, shape=(len(self.remaining_energy),), dtype=np.float32),
212
- # 'consumption_energy': gym.spaces.Box(low=0, high=initial_energy, shape=(len(self.remaining_energy),), dtype=np.float32),
213
- # 'sensor_positions': gym.spaces.Box(low=lower_bound, high=upper_bound, shape=(len(self.sensor_positions), 2), dtype=np.float32),
214
- # 'number_of_packets': gym.spaces.Box(low=0, high=self.n_sensors * initial_number_of_packets + 1, shape=(len(self.number_of_packets),), dtype=int)
215
- # })
177
+ return [{'remaining_energy': np.array([e]),
178
+ 'consumption_energy': np.array([initial_energy - e]),
179
+ 'sensor_positions': p,
180
+ 'number_of_packets': np.array([d])
181
+ } for e, p, d in zip(self.remaining_energy, self.sensor_positions, self.number_of_packets)]
216
182
 
217
183
  def _get_observation_space(self):
218
- return gym.spaces.Dict({
219
- 'remaining_energy': gym.spaces.Box(low=0, high=initial_energy, shape=(self.n_agents,), dtype=np.float32),
220
- 'consumption_energy': gym.spaces.Box(low=0, high=initial_energy, shape=(self.n_agents,), dtype=np.float32),
221
- 'sensor_positions': gym.spaces.Box(low=lower_bound, high=upper_bound, shape=(self.n_agents, 2), dtype=np.float32),
222
- 'number_of_packets': gym.spaces.Box(low=0, high=self.n_sensors * initial_number_of_packets + 1, shape=(self.n_agents,), dtype=int)
223
- })
224
-
184
+ return Dict({
185
+ 'remaining_energy': Box(low=0, high=initial_energy, shape=(1,), dtype=np.float64),
186
+ 'consumption_energy': Box(low=0, high=initial_energy, shape=(1,), dtype=np.float64),
187
+ 'sensor_positions': Box(low=lower_bound, high=upper_bound, shape=(2,), dtype=np.float64),
188
+ 'number_of_packets': Box(low=0, high=self.n_sensors * initial_number_of_packets + 1, shape=(1,), dtype=int)
189
+ })
225
190
 
226
191
  def get_state(self):
227
192
  return self._get_obs()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.136
3
+ Version: 3.0.138
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=MC6br6iarAQVDkHZ_fHL4cow2TXsDQ3El5JDyD-SMs0,194
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=baMc1CiZz4NgtfUQ8LJW3057TCL9dPnuNEsreHMsSzs,21517
4
+ gym_examples-3.0.138.dist-info/METADATA,sha256=SogLjiQFZS1A_aJzr_gKY5piRBolr7FAxebEp_wKuds,412
5
+ gym_examples-3.0.138.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.138.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.138.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=xvHRv3fRlCpsfAE5OWZyelEhTTW1ttWxeyhdR2wnYzI,194
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=h9SNs1zmilOorshyy9oK80xC4dwib8uC_UVhNoN_6XE,23947
4
- gym_examples-3.0.136.dist-info/METADATA,sha256=xqB0l3vTR9XjCEnHm4dX_gxBnVaK7nD1ozEz6kNjMpc,412
5
- gym_examples-3.0.136.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.136.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.136.dist-info/RECORD,,