gym-examples 3.0.221__py3-none-any.whl → 3.0.223__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "3.0.221"
9
+ __version__ = "3.0.223"
@@ -11,7 +11,7 @@ import torch.nn.functional as F
11
11
  import os
12
12
 
13
13
  # Define the network parameters for the final reward function
14
- input_dim = 7 # length of the individual rewards vector
14
+ input_dim = 2 # length of the individual rewards vector
15
15
  output_dim = 1 # final reward
16
16
 
17
17
  Eelec = 50e-9 # energy consumption per bit in joules
@@ -218,32 +218,32 @@ class WSNRoutingEnv(gym.Env):
218
218
  # print(f"Step: {self.steps}, Rewards: {rewards}, Done: {dones}")
219
219
  dones = all(dones) # Done if all agents are done
220
220
 
221
- # if os.getenv('PRINT_STATS') == 'True': # We are trying to extract only the statistics for the PPO algorithm
222
- # self.number_of_steps += 1
223
- # self.episode_return += rewards
224
- # if self.number_of_steps >= self.num_timesteps:
225
- # self.episode_returns.append(self.episode_return)
226
- # self.episode_std_remaining_energy.append(np.std(self.remaining_energy))
227
- # self.episode_mean_remaining_energy.append(np.mean(self.remaining_energy))
228
- # self.episode_total_consumption_energy.append(np.sum(initial_energy - self.remaining_energy))
229
- # self.episode_network_throughput.append(self.network_throughput)
230
- # self.episode_packet_delivery_ratio.append(self.packet_delivery_ratio)
231
- # self.episode_network_lifetime.append(self.network_lifetime)
232
- # self.episode_average_latency.append(self.average_latency)
233
-
234
- # metrics = {
235
- # "returns_PPO": self.episode_returns,
236
- # "std_remaining_energy_PPO": self.episode_std_remaining_energy,
237
- # "total_consumption_energy_PPO": self.episode_total_consumption_energy,
238
- # "mean_remaining_energy_PPO": self.episode_mean_remaining_energy,
239
- # "network_throughput_PPO": self.episode_network_throughput,
240
- # "packet_delivery_ratio_PPO": self.episode_packet_delivery_ratio,
241
- # "network_lifetime_PPO": self.episode_network_lifetime,
242
- # "average_latency_PPO": self.episode_average_latency
243
- # }
244
-
245
- # for metric_name, metric_value in metrics.items():
246
- # np.save(f"{base_back_up_dir}{metric_name}_{self.version}.npy", np.array(metric_value))
221
+ if os.getenv('PRINT_STATS') == 'True': # We are trying to extract only the statistics for the PPO algorithm
222
+ self.number_of_steps += 1
223
+ self.episode_return += rewards
224
+ if self.number_of_steps >= self.num_timesteps:
225
+ self.episode_returns.append(self.episode_return)
226
+ self.episode_std_remaining_energy.append(np.std(self.remaining_energy))
227
+ self.episode_mean_remaining_energy.append(np.mean(self.remaining_energy))
228
+ self.episode_total_consumption_energy.append(np.sum(initial_energy - self.remaining_energy))
229
+ self.episode_network_throughput.append(self.network_throughput)
230
+ self.episode_packet_delivery_ratio.append(self.packet_delivery_ratio)
231
+ self.episode_network_lifetime.append(self.network_lifetime)
232
+ self.episode_average_latency.append(self.average_latency)
233
+
234
+ metrics = {
235
+ "returns_PPO": self.episode_returns,
236
+ "std_remaining_energy_PPO": self.episode_std_remaining_energy,
237
+ "total_consumption_energy_PPO": self.episode_total_consumption_energy,
238
+ "mean_remaining_energy_PPO": self.episode_mean_remaining_energy,
239
+ "network_throughput_PPO": self.episode_network_throughput,
240
+ "packet_delivery_ratio_PPO": self.episode_packet_delivery_ratio,
241
+ "network_lifetime_PPO": self.episode_network_lifetime,
242
+ "average_latency_PPO": self.episode_average_latency
243
+ }
244
+
245
+ for metric_name, metric_value in metrics.items():
246
+ np.save(f"{base_back_up_dir}{metric_name}_{self.version}.npy", np.array(metric_value))
247
247
 
248
248
  return self._get_obs(), rewards, dones, {}
249
249
 
@@ -386,7 +386,8 @@ class WSNRoutingEnv(gym.Env):
386
386
 
387
387
  rewards_performance = np.array([reward_latency, reward_network_throughput, reward_packet_delivery_ratio])
388
388
 
389
- return np.concatenate((rewards_energy, rewards_performance))
389
+ # return np.concatenate((rewards_energy, rewards_performance))
390
+ return np.array([reward_consumption_energy, reward_dispersion_remaining_energy])
390
391
 
391
392
 
392
393
  def compute_network_rewards(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.221
3
+ Version: 3.0.223
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=-G7Exc_e328of06oP-Qigg3zfNL5kL7JYUYFdCDaTjk,194
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=HwisiCG_Q6xWufQnA-M_UjhLJdrPGAjkgV-C--84J90,26298
4
+ gym_examples-3.0.223.dist-info/METADATA,sha256=r2QkgGL6NxGTzUsPTyYBbsMnzxvZbr6nG_czFNvRUYQ,412
5
+ gym_examples-3.0.223.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.223.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.223.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=e3IzKZG7FDVeQeid5naFn1PzaMlIKk94FSEIIRc4DOk,194
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=9QXOPbuHBEAA4X24dYgYRWQdayupNxTv_Pmyh6PzmRk,26254
4
- gym_examples-3.0.221.dist-info/METADATA,sha256=geg1ENQ3KH6mdK_BF8opzQPDwil7KcCp-OXBsbrIP-8,412
5
- gym_examples-3.0.221.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.221.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.221.dist-info/RECORD,,