gym-examples 2.0.100__py3-none-any.whl → 2.0.102__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.100"
9
+ __version__ = "2.0.102"
@@ -285,33 +285,32 @@ class WSNRoutingEnv(gym.Env):
285
285
  reward_dispersion_remaining_energy = self.compute_reward_dispersion_remaining_energy()
286
286
  reward_number_of_packets = self.compute_reward_number_of_packets(action)
287
287
 
288
- # return [reward_angle, reward_distance, reward_consumption_energy, reward_dispersion_remaining_energy, reward_number_of_packets]
288
+ return [reward_angle, reward_distance, reward_consumption_energy, reward_dispersion_remaining_energy]
289
289
  # return [reward_angle, reward_distance, reward_consumption_energy, reward_number_of_packets]
290
290
  # return [reward_angle, reward_distance, reward_dispersion_remaining_energy, reward_number_of_packets]
291
291
  # return [reward_angle, reward_distance, reward_consumption_energy, reward_dispersion_remaining_energy]
292
- return [reward_consumption_energy]
293
292
 
294
- # def network_reward_dispersion_remaining_energy(self):
295
- # '''
296
- # Compute the reward based on the standard deviation of the remaining energy at the network level
297
- # '''
298
- # dispersion_remaining_energy = np.std(self.remaining_energy)
299
- # # Normalize the standard deviation of the remaining energy
300
- # max_dispersion_remaining_energy = initial_energy / 2 # maximum standard deviation of the remaining energy if n_sensors is even
301
- # normalized_dispersion_remaining_energy = dispersion_remaining_energy / max_dispersion_remaining_energy
293
+ def network_reward_dispersion_remaining_energy(self):
294
+ '''
295
+ Compute the reward based on the standard deviation of the remaining energy at the network level
296
+ '''
297
+ dispersion_remaining_energy = np.std(self.remaining_energy)
298
+ # Normalize the standard deviation of the remaining energy
299
+ max_dispersion_remaining_energy = initial_energy / 2 # maximum standard deviation of the remaining energy if n_sensors is even
300
+ normalized_dispersion_remaining_energy = dispersion_remaining_energy / max_dispersion_remaining_energy
302
301
 
303
- # return np.clip(1 - normalized_dispersion_remaining_energy, 0, 1)
302
+ return np.clip(1 - normalized_dispersion_remaining_energy, 0, 1)
304
303
 
305
- # def network_reward_consumption_energy(self):
306
- # '''
307
- # Compute the reward based on the total energy consumption (transmission, reception) at the network level
308
- # '''
309
- # total_energy = self.n_sensors * initial_energy - np.sum(self.remaining_energy)
310
- # # Normalize the total energy consumption
311
- # max_total_energy = self.n_sensors * initial_energy
312
- # normalized_total_energy = total_energy / max_total_energy
304
+ def network_reward_consumption_energy(self):
305
+ '''
306
+ Compute the reward based on the total energy consumption (transmission, reception) at the network level
307
+ '''
308
+ total_energy = self.n_sensors * initial_energy - np.sum(self.remaining_energy)
309
+ # Normalize the total energy consumption
310
+ max_total_energy = self.n_sensors * initial_energy
311
+ normalized_total_energy = total_energy / max_total_energy
313
312
 
314
- # return np.clip(1 - normalized_total_energy, 0, 1)
313
+ return np.clip(1 - normalized_total_energy, 0, 1)
315
314
 
316
315
  def integrate_mobility(self):
317
316
  '''
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.100
3
+ Version: 2.0.102
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=4j4wd2UG-FV0tViFBs-Ekb4ORSaPo-EcPL1zsyIkn60,194
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=W_BlqrQ4ZS8eyfyKdvgNx2dinCgcYaybGMwjoRcMHFc,17188
4
+ gym_examples-2.0.102.dist-info/METADATA,sha256=nTpULV8tufyIQdQ8ymRIpLrK-c1xMoVgHkebpjUQIw0,412
5
+ gym_examples-2.0.102.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.102.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.102.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=8TBO37px9cuBpbpMXpVbhFfbMLa4Q4gQwr3NFlkWt8E,194
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=_0l_nttd_BfGQN5cE22nEdfoC9KCedztW43YyjD314s,17296
4
- gym_examples-2.0.100.dist-info/METADATA,sha256=ZXPIwuuBfMVul90Scs4Je-wwLk_qwAqrOMob3XSRTYA,412
5
- gym_examples-2.0.100.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.100.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.100.dist-info/RECORD,,