gym-examples 2.0.66__py3-none-any.whl → 2.0.68__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.66"
9
+ __version__ = "2.0.68"
@@ -58,6 +58,10 @@ class WSNRoutingEnv(gym.Env):
58
58
  self.scale_displacement = 0.01 * (upper_bound - lower_bound) # scale of the random displacement of the sensors
59
59
  self.epsilon = 1e-10 # small value to avoid division by zero
60
60
  self.rewards_individual = [0] * self.n_sensors
61
+ # Initialize the position of the sensors randomly
62
+ self.sensor_positions = np.random.rand(self.n_sensors, 2) * (upper_bound - lower_bound) + lower_bound
63
+ self.distance_to_base = np.linalg.norm(self.sensor_positions - base_station_position, axis=1)
64
+
61
65
 
62
66
  # Define observation space
63
67
  self.observation_space = Tuple(
@@ -76,9 +80,6 @@ class WSNRoutingEnv(gym.Env):
76
80
  print(f"Episode: {self.episode_count}")
77
81
  print(f"Rewards: {self.rewards_individual}")
78
82
  print("=================================================\n")
79
- # Initialize the position of the sensors randomly
80
- self.sensor_positions = np.random.rand(self.n_sensors, 2) * (upper_bound - lower_bound) + lower_bound
81
- self.distance_to_base = np.linalg.norm(self.sensor_positions - base_station_position, axis=1)
82
83
  # Initialize remaining energy of each sensor to initial_energy joule
83
84
  self.remaining_energy = np.ones(self.n_sensors) * initial_energy
84
85
  self.consumption_energy = np.zeros(self.n_sensors)
@@ -178,6 +179,11 @@ class WSNRoutingEnv(gym.Env):
178
179
 
179
180
  def update_sensor_energies(self, i, delta_energy):
180
181
  self.consumption_energy[i] += delta_energy
182
+ if delta_energy > 0:
183
+ print("\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
184
+ print(f"Sensor {i} consumed {delta_energy} J")
185
+ print(f"Consumption energy of sensor {i}: {self.consumption_energy[i]} J")
186
+ print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n")
181
187
  self.remaining_energy[i] -= delta_energy
182
188
 
183
189
  def transmission_energy(self, number_of_packets, distance):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.66
3
+ Version: 2.0.68
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=aYEr7oomKYOI8jIHcAnmgwgyswe945nmt2ypd_YXx3Y,193
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=wAaF2JQ8UmMeA6cvS6u9KNYsqXSDBC8-EMiytlVFPQs,17171
4
+ gym_examples-2.0.68.dist-info/METADATA,sha256=KbqG62aXF3WmIq9eLYRbmszRqMeyYnY9n3LLnoZ1YoI,411
5
+ gym_examples-2.0.68.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.68.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.68.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=u7b6xxMUojiVFajKmLl9xvGS2XfN1qhUxD0uqgd3RKA,193
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=OHQXnd90YAKd8QwxtPrflaoitx_vk4zKq2Lj_dtNYCA,16817
4
- gym_examples-2.0.66.dist-info/METADATA,sha256=jHjDwC3qD7iq-EqlzXjY5UmCjwAuZCUcng9FbAzBNkM,411
5
- gym_examples-2.0.66.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.66.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.66.dist-info/RECORD,,