gym-examples 3.0.285__py3-none-any.whl → 3.0.287__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -5,4 +5,4 @@ register(
5
5
  entry_point="gym_examples.envs:WSNRoutingEnv",
6
6
  )
7
7
 
8
- __version__ = "3.0.285"
8
+ __version__ = "3.0.287"
@@ -81,10 +81,7 @@ class WSNRoutingEnv(gym.Env):
81
81
  )
82
82
 
83
83
  # self.action_space = Tuple(tuple([Discrete(self.n_sensors + 1)] * self.n_agents))
84
- # self.action_space = MultiDiscrete([self.n_sensors + 1] * self.n_agents)
85
- # self.action_space = MultiDiscrete([self.n_agents, self.n_sensors + 1])
86
- # self.action_space = Discrete(self.n_sensors + 1) # +1 for the base station
87
- self.action_space = Discrete((self.n_sensors + 1)**self.n_agents)
84
+ self.action_space = MultiDiscrete([self.n_sensors + 1] * self.n_agents)
88
85
 
89
86
  self.reset()
90
87
 
@@ -120,7 +117,6 @@ class WSNRoutingEnv(gym.Env):
120
117
  self.steps += 1
121
118
  rewards = [-max_reward] * self.n_sensors
122
119
  dones = [False] * self.n_sensors
123
- actions = self.to_base_n(actions, self.n_sensors + 1)
124
120
  for i, action in enumerate(actions):
125
121
  if self.remaining_energy[i] <= 0 or self.number_of_packets[i] <= 0:
126
122
  continue # Skip if sensor has no energy left or no packets to transmit
@@ -195,11 +191,12 @@ class WSNRoutingEnv(gym.Env):
195
191
 
196
192
  rewards = [reward.item() if isinstance(reward, torch.Tensor) else reward for reward in rewards] # Convert the reward to a float
197
193
  rewards = np.mean(rewards) # Average the rewards
198
-
194
+
199
195
  for i in range(self.n_sensors):
200
196
  if not dones[i]:
201
197
  dones[i] = self.remaining_energy[i] <= 0 or self.number_of_packets[i] == 0
202
198
  dones = np.all(dones)
199
+ print(f"done: {dones}, and type: {type(dones)}")
203
200
 
204
201
  return self._get_obs(), rewards, dones, self.get_metrics()
205
202
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.285
3
+ Version: 3.0.287
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=_zsZY7FYGbXB0Ei4w6ldnJhp56xvi7MFMIUqsPtm_ZA,166
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=Mv_1wuAt-ancx-BfkCCVXjBssaENSliEU8jgypys_2c,23350
4
+ gym_examples-3.0.287.dist-info/METADATA,sha256=283fW25USJEPBYXfleS0cRs0F7bFial5AjjSzdYC05M,412
5
+ gym_examples-3.0.287.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.287.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.287.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=RG4x72mbBXJgNSIrzig4Fq6xeAVsPtwrVaN8fHLoVHc,166
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=7AKFR_Q4I_GADV3Q78g-JYacUSIwRDzjxLRZ5X9lzFc,23610
4
- gym_examples-3.0.285.dist-info/METADATA,sha256=fR_FUKKdoAUxYEjQ8KbeVC4quinNkfxfqShtSaWFMlo,412
5
- gym_examples-3.0.285.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.285.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.285.dist-info/RECORD,,