gym-examples 3.0.341__py3-none-any.whl → 3.0.343__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -5,4 +5,4 @@ register(
5
5
  entry_point="gym_examples.envs:WSNRoutingEnv",
6
6
  )
7
7
 
8
- __version__ = "3.0.341"
8
+ __version__ = "3.0.343"
@@ -57,8 +57,7 @@ class WSNRoutingEnv(gym.Env):
57
57
  # Create filenames to save statistics for evaluation
58
58
  self.statistics_filename = "results/data/Statistics_filename_" + os.getenv('ALGO_NAME') + ".txt"
59
59
  with open(self.statistics_filename, 'w') as file:
60
- file.write("Episode return, Total of energy consumption, Std of remaining energy, Mean of remaining energy, \
61
- Average latency, Network throughput, Packet delivery ratio, Network lifetime\n")
60
+ file.write("Episode return, Total of energy consumption, Std of remaining energy, Mean of remaining energy, Network lifetime\n")
62
61
  # Initialize list of episode metrics
63
62
  self.num_timesteps = num_timesteps # This argument is for the PPO algorithm
64
63
  self.version = version # This argument is for the PPO algorithm
@@ -90,7 +89,7 @@ class WSNRoutingEnv(gym.Env):
90
89
  self.reset()
91
90
 
92
91
  def reset(self):
93
- if self.number_of_steps > 3: # Change 30000 to a parameter taken from args or kwargs
92
+ if self.number_of_steps > 30000: # Change 30000 to a parameter taken from args or kwargs
94
93
  self.episode_returns.append(self.episode_return)
95
94
  observations = self._get_obs()
96
95
  remaining_energy_values = np.array([sensor['remaining_energy'] for sensor in observations])
@@ -101,18 +100,14 @@ class WSNRoutingEnv(gym.Env):
101
100
 
102
101
  # Append the statistics to the .txt file
103
102
  with open(self.statistics_filename, 'a') as file:
104
- file.write(f"{self.episode_return}, {total_consumption_energy}, {std_remaining_energy}, {mean_remaining_energy}\
105
- {self.average_latency}, {self.network_throughput}, {self.packet_delivery_ratio}, {self.network_lifetime}\n")
106
-
107
- print("Episode count: ", self.episode_count)
108
- print("Episode return: ", self.episode_return)
109
- print("Total consumption energy: ", total_consumption_energy)
110
- print("Std remaining energy: ", std_remaining_energy)
111
- print("Mean remaining energy: ", mean_remaining_energy)
112
- print("Average latency: ", self.average_latency)
113
- print("Network throughput: ", self.network_throughput)
114
- print("Packet delivery ratio: ", self.packet_delivery_ratio)
115
- print("Network lifetime: ", self.network_lifetime)
103
+ file.write(f"{self.episode_return}, {total_consumption_energy}, {std_remaining_energy}, {mean_remaining_energy}, {self.network_lifetime}\n")
104
+
105
+ # print("Episode count: ", self.episode_count)
106
+ # print("Episode return: ", self.episode_return)
107
+ # print("Total consumption energy: ", total_consumption_energy)
108
+ # print("Std remaining energy: ", std_remaining_energy)
109
+ # print("Mean remaining energy: ", mean_remaining_energy)
110
+ # print("Network lifetime: ", self.network_lifetime)
116
111
 
117
112
  self.episode_return = 0
118
113
  self.sensor_positions = np.random.rand(self.n_sensors, 2) * (upper_bound - lower_bound) + lower_bound
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.341
3
+ Version: 3.0.343
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=MVmgLyfSlzf6qykQwYdJPUldhySFwg-1oga6QkmSbzE,166
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=eUT4sHZe51N_G_fgw1xfgzV34dbelhoHraGC1sHcM_M,25154
4
+ gym_examples-3.0.343.dist-info/METADATA,sha256=6E0hGBlmv9LzaKtMXvwbMYz2y6V4Ig_mhnFMaFKYBGA,412
5
+ gym_examples-3.0.343.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.343.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.343.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=qnnstwAvKU6raAtb0d7la0grQkkF0sZn9a7-R7Cfu4M,166
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=nmL2L6Fyzh6bAeqFJSNYITZDqFnQpBQqCKA5pItpR-w,25549
4
- gym_examples-3.0.341.dist-info/METADATA,sha256=OIVfNLG2pq7Gjz6AYRcf3yWDM3gFdayY9GjcUiQnLRg,412
5
- gym_examples-3.0.341.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.341.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.341.dist-info/RECORD,,