gym-examples 2.0.76__py3-none-any.whl → 2.0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.76"
9
+ __version__ = "2.0.78"
@@ -133,7 +133,7 @@ class WSNRoutingEnv(gym.Env):
133
133
  rewards = [0.5 * r + 0.5 * (self.network_reward_consumption_energy() + self.network_reward_dispersion_remaining_energy()) for r in rewards]
134
134
 
135
135
  # Only proceed if network consumption energy is not zero to avoid unnecessary list comprehension
136
- self.rewards_individual = [r for r in self.rewards_individual if ((r != 0) and (r[3] < 1))]
136
+ self.rewards_individual = [r for r in self.rewards_individual if ((r != 0) and (r[len(r) -1] < 1))]
137
137
 
138
138
  # self.rewards_individual = [{"ind": r, "net_consumption_energy": self.network_reward_consumption_energy(), "net_dispersion_energy": self.network_reward_dispersion_remaining_energy()} for r in self.rewards_individual if ((r != 0) and (self.network_reward_consumption_energy() != 0))]
139
139
  for i in range(self.n_sensors):
@@ -273,7 +273,8 @@ class WSNRoutingEnv(gym.Env):
273
273
  reward_number_of_packets = self.compute_reward_number_of_packets(action)
274
274
 
275
275
  # return [reward_angle, reward_distance, reward_consumption_energy, reward_dispersion_remaining_energy, reward_number_of_packets]
276
- return [reward_angle, reward_distance, reward_consumption_energy, reward_number_of_packets]
276
+ # return [reward_angle, reward_distance, reward_consumption_energy, reward_number_of_packets]
277
+ return [reward_angle, reward_distance, reward_number_of_packets]
277
278
 
278
279
  def network_reward_dispersion_remaining_energy(self):
279
280
  '''
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.76
3
+ Version: 2.0.78
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=udYZkpCQxSAsM5cZAiH_bjCIVJW_7mVBgej_95NoItw,193
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=Tw8Fcib96WJ2HWQYibc4wLBkyl6uui9x0Irpd8iYV-c,16174
4
+ gym_examples-2.0.78.dist-info/METADATA,sha256=UHbxTHKAOpaZdy3HqGEavxDjT91AQ-nC1x9XYvcXAp0,411
5
+ gym_examples-2.0.78.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.78.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.78.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=-v2IXwiGqmUdUbBPaWBQUoKOGvvn0Je7GdQXoOb1AnQ,193
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=67-7LtO0nEd1UBSdX16Sdwfb3Es4liOYAOQznrQ9zwk,16090
4
- gym_examples-2.0.76.dist-info/METADATA,sha256=LjOKaPdP_a29GaEcjdZmn9icpUopSRDORqvPrYDxwb8,411
5
- gym_examples-2.0.76.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.76.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.76.dist-info/RECORD,,