gym-examples 3.0.29__py3-none-any.whl → 3.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "3.0.29"
9
+ __version__ = "3.0.31"
@@ -69,24 +69,7 @@ class WSNRoutingEnv(gym.Env):
69
69
 
70
70
 
71
71
  def reset(self):
72
- # Save the metrics to a CSV file
73
- if self.episode_count > 0:
74
- with open('metrics.csv', mode='a') as file:
75
- writer = csv.writer(file)
76
- if self.episode_count == 1:
77
- writer.writerow(['Episode', 'Network throughput', 'Energy efficiency', 'Packet delivery ratio', 'Network lifetime', 'Average latency', 'First node dead time'])
78
- writer.writerow([self.episode_count - 1, self.network_throughput, self.energy_efficiency, self.packet_delivery_ratio, self.network_lifetime, self.average_latency, self.first_node_dead_time])
79
-
80
- if self.episode_count % 50 == 0 and self.episode_count > 0:
81
- print(f"\n========================================")
82
- print(f"Episode {self.episode_count} with previous number of steps = {self.steps}")
83
- print(f"Network throughput = {self.network_throughput}")
84
- print(f"Energy efficiency = {self.energy_efficiency}")
85
- print(f"Packet delivery ratio = {self.packet_delivery_ratio}")
86
- print(f"Network lifetime = {self.network_lifetime}")
87
- print(f"Average latency = {self.average_latency}")
88
- print(f"First node dead time = {self.first_node_dead_time}")
89
- print(f"========================================\n")
72
+
90
73
  self.sensor_positions = np.random.rand(self.n_sensors, 2) * (upper_bound - lower_bound) + lower_bound
91
74
  self.distance_to_base = np.linalg.norm(self.sensor_positions - base_station_position, axis=1)
92
75
  self.remaining_energy = np.ones(self.n_sensors) * initial_energy
@@ -193,6 +176,8 @@ class WSNRoutingEnv(gym.Env):
193
176
  if self.first_node_dead_time is None and np.any(self.remaining_energy <= 0):
194
177
  self.first_node_dead_time = self.steps
195
178
 
179
+ self.get_metrics()
180
+
196
181
  return self._get_obs(), rewards, dones, {}
197
182
 
198
183
  def _get_obs(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.29
3
+ Version: 3.0.31
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=augD2S6JxvSYGCFGUz5j2KohuhHRrqFqekAH7LtUdZ4,193
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=HDLGNMvbhGuweGA0IcJkhw1VuzdSiMTN0Ip-6rdvXUQ,18132
4
+ gym_examples-3.0.31.dist-info/METADATA,sha256=IQDb4-6MmoLEdaGk9c2paIu5VrOp31WLclbjjW9M2mM,411
5
+ gym_examples-3.0.31.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.31.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.31.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=kjxQh1V3vNafWtrGjEAWspS0vT9jGH8kmGmDnPcUxpk,193
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=oFnj-yRCNBywQGr8hJ06KJ5JTmhb-upzX3B3_CwlAUw,19422
4
- gym_examples-3.0.29.dist-info/METADATA,sha256=YPpF8dCRhKJRhsnbBv5Bl8UY037kVd3PFXxapuBmf6M,411
5
- gym_examples-3.0.29.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.29.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.29.dist-info/RECORD,,