gym-examples 3.0.181__py3-none-any.whl → 3.0.182__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "3.0.181"
9
+ __version__ = "3.0.182"
@@ -52,11 +52,13 @@ class WSNRoutingEnv(gym.Env):
52
52
 
53
53
  print_stats = False # Global flag to control printing of statistics
54
54
 
55
- def __init__(self, n_sensors = 20, coverage_radius=(upper_bound - lower_bound)/4):
55
+ def __init__(self, n_sensors = 20, coverage_radius=(upper_bound - lower_bound)/4, num_timesteps = None, version = None):
56
56
 
57
57
  super(WSNRoutingEnv, self).__init__()
58
58
 
59
59
  # Initialize list of episode metrics
60
+ self.num_timesteps = num_timesteps # This argument is for the PPO algorithm
61
+ self.version = version # This argument is for the PPO algorithm
60
62
  self.number_of_steps = 0 # Total number of steps taken by the agent since the beginning of the training
61
63
  self.episode_returns = []
62
64
  self.episode_std_remaining_energy = []
@@ -212,7 +214,7 @@ class WSNRoutingEnv(gym.Env):
212
214
  if os.getenv('PRINT_STATS') == 'True': # We are trying to extract only the statistics for the PPO algorithm
213
215
  self.number_of_steps += 1
214
216
  self.episode_return += rewards
215
- if self.number_of_steps >= args.num_timesteps:
217
+ if self.number_of_steps >= self.num_timesteps:
216
218
  self.episode_returns.append(self.episode_return)
217
219
  self.episode_std_remaining_energy.append(np.std(self.remaining_energy))
218
220
  self.episode_mean_remaining_energy.append(np.mean(self.remaining_energy))
@@ -222,14 +224,14 @@ class WSNRoutingEnv(gym.Env):
222
224
  self.episode_network_lifetime.append(self.network_lifetime)
223
225
  self.episode_average_latency.append(self.average_latency)
224
226
 
225
- np.save(f"{base_back_up_dir}returns_QMIX_{version}.npy", np.array(self.episode_returns))
226
- np.save(f"{base_back_up_dir}std_remaining_energy_QMIX_{version}.npy", np.array(self.episode_std_remaining_energy))
227
- np.save(f"{base_back_up_dir}total_consumption_energy_QMIX_{version}.npy", np.array(self.episode_total_consumption_energy))
228
- np.save(f"{base_back_up_dir}mean_remaining_energy_QMIX_{version}.npy", np.array(self.episode_mean_remaining_energy))
229
- np.save(f"{base_back_up_dir}network_throughput_QMIX_{version}.npy", np.array(self.episode_network_throughput))
230
- np.save(f"{base_back_up_dir}packet_delivery_ratio_QMIX_{version}.npy", np.array(self.episode_packet_delivery_ratio))
231
- np.save(f"{base_back_up_dir}network_lifetime_QMIX_{version}.npy", np.array(self.episode_network_lifetime))
232
- np.save(f"{base_back_up_dir}average_latency_QMIX_{version}.npy", np.array(self.episode_average_latency))
227
+ np.save(f"{base_back_up_dir}returns_QMIX_{self.version}.npy", np.array(self.episode_returns))
228
+ np.save(f"{base_back_up_dir}std_remaining_energy_QMIX_{self.version}.npy", np.array(self.episode_std_remaining_energy))
229
+ np.save(f"{base_back_up_dir}total_consumption_energy_QMIX_{self.version}.npy", np.array(self.episode_total_consumption_energy))
230
+ np.save(f"{base_back_up_dir}mean_remaining_energy_QMIX_{self.version}.npy", np.array(self.episode_mean_remaining_energy))
231
+ np.save(f"{base_back_up_dir}network_throughput_QMIX_{self.version}.npy", np.array(self.episode_network_throughput))
232
+ np.save(f"{base_back_up_dir}packet_delivery_ratio_QMIX_{self.version}.npy", np.array(self.episode_packet_delivery_ratio))
233
+ np.save(f"{base_back_up_dir}network_lifetime_QMIX_{self.version}.npy", np.array(self.episode_network_lifetime))
234
+ np.save(f"{base_back_up_dir}average_latency_QMIX_{self.version}.npy", np.array(self.episode_average_latency))
233
235
 
234
236
  return self._get_obs(), rewards, dones, {}
235
237
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.181
3
+ Version: 3.0.182
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=RiS3AduI_PATwPjqyEMvdXeVB7NUbBrYUWBJchVfwuw,194
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=vJ94e6nzg1iSxSyfEYffTjhN7szbIz1ZaqU4tG_U0_Y,25901
4
+ gym_examples-3.0.182.dist-info/METADATA,sha256=R7BDOPdQSi9IwPOQGfq9a8uqNBl0hYvwMHjJD2l-Jo8,412
5
+ gym_examples-3.0.182.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.182.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.182.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=iQVGN4xf7aVsGStExIGCellet65qyWdATF9mMBZUd38,194
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=xPQBioLxNacHCoD9C1r6q5kyT0SUsGKQ4QIunrLsJfY,25665
4
- gym_examples-3.0.181.dist-info/METADATA,sha256=OP7sQV_EnNvX-bokFyrnW_YPblQcblvOv9DyfDY841Y,412
5
- gym_examples-3.0.181.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.181.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.181.dist-info/RECORD,,