gym-examples 3.0.40__py3-none-any.whl → 3.0.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "3.0.40"
9
+ __version__ = "3.0.42"
@@ -10,7 +10,7 @@ import torch.nn as nn
10
10
  import torch.nn.functional as F
11
11
 
12
12
  # Define the network parameters for the final reward function
13
- input_dim = 6 # lenght of the individual rewards vector
13
+ input_dim = 4 # lenght of the individual rewards vector
14
14
  output_dim = 1 # final reward
15
15
 
16
16
  Eelec = 50e-9 # energy consumption per bit in joules
@@ -153,8 +153,8 @@ class WSNRoutingEnv(gym.Env):
153
153
  self.number_of_packets[action] += self.number_of_packets[i]
154
154
  self.number_of_packets[i] = 0 # Reset the number of packets of the sensor i
155
155
  # Calculate final reward
156
- # rewards[i] = self.compute_attention_rewards(rewards[i])
157
- rewards[i] = self.compute_sum_rewards(rewards[i])
156
+ rewards[i] = self.compute_attention_rewards(rewards[i])
157
+ # rewards[i] = self.compute_sum_rewards(rewards[i])
158
158
  for i in range(self.n_sensors):
159
159
  if (self.remaining_energy[i] <= 0) or (self.number_of_packets[i] <= 0):
160
160
  dones[i] = True
@@ -301,7 +301,8 @@ class WSNRoutingEnv(gym.Env):
301
301
  reward_dispersion_remaining_energy = self.compute_reward_dispersion_remaining_energy()
302
302
  reward_number_of_packets = self.compute_reward_number_of_packets(action)
303
303
 
304
- return [reward_angle, reward_consumption_energy, reward_dispersion_remaining_energy, reward_number_of_packets]
304
+ # return [reward_angle, reward_consumption_energy, reward_dispersion_remaining_energy, reward_number_of_packets]
305
+ return [reward_consumption_energy, reward_dispersion_remaining_energy]
305
306
 
306
307
  def network_reward_dispersion_remaining_energy(self):
307
308
  '''
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.40
3
+ Version: 3.0.42
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=kLTFJhJe48NKPRenv6nTfwR9FbFVrCMz4hzeIhHTrgI,193
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=P3-MHflhQ29pLwrDb2oY7NjJvbpDXuTz6bPX2_mbqPQ,19522
4
+ gym_examples-3.0.42.dist-info/METADATA,sha256=T4wmhIiKxlhX7Mccah7D-kCaZap2pgJPtrVgyo0rMPI,411
5
+ gym_examples-3.0.42.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.42.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.42.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=RAJJmbqLaH4xxUvVUvfFFeXvEHl2bo2r_zLkqr9TUAg,193
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=k5yJgvCEMrizm8ZZOpukLRVcnAMTnYWP_ri8nnG9KYc,19440
4
- gym_examples-3.0.40.dist-info/METADATA,sha256=bLu5N3z4h-SRmD_CelwNmWuJGT6mcxJ9Hqyhz6aeQ8o,411
5
- gym_examples-3.0.40.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.40.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.40.dist-info/RECORD,,