gym-examples 2.0.103__py3-none-any.whl → 2.0.105__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.103"
9
+ __version__ = "2.0.105"
@@ -137,7 +137,7 @@ class WSNRoutingEnv(gym.Env):
137
137
  rewards[i] = final_reward
138
138
  # rewards = [0.5 * r + 0.5 * (1/self.n_sensors) * (self.network_reward_consumption_energy() + self.network_reward_dispersion_remaining_energy()) for r in rewards]
139
139
  # rewards = [0.5 * r + 0.5 * (self.network_reward_consumption_energy() + self.network_reward_dispersion_remaining_energy()) for r in rewards]
140
-
140
+ rewards = 0.5 * np.sum(rewards) + 0.5 * (self.network_reward_consumption_energy() + self.network_reward_dispersion_remaining_energy())
141
141
  # Only proceed if network consumption energy is not zero to avoid unnecessary list comprehension
142
142
  # self.rewards_individual = [r for r in self.rewards_individual if ((r != 0) and (r[len(r) -1] < 1))]
143
143
 
@@ -285,7 +285,7 @@ class WSNRoutingEnv(gym.Env):
285
285
  reward_dispersion_remaining_energy = self.compute_reward_dispersion_remaining_energy()
286
286
  reward_number_of_packets = self.compute_reward_number_of_packets(action)
287
287
 
288
- return [reward_angle, reward_consumption_energy, reward_dispersion_remaining_energy]
288
+ return [reward_angle, reward_consumption_energy, reward_dispersion_remaining_energy, reward_number_of_packets]
289
289
  # return [reward_angle, reward_distance, reward_consumption_energy, reward_number_of_packets]
290
290
  # return [reward_angle, reward_distance, reward_dispersion_remaining_energy, reward_number_of_packets]
291
291
  # return [reward_angle, reward_distance, reward_consumption_energy, reward_dispersion_remaining_energy]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.103
3
+ Version: 2.0.105
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=d42Xiyw7DYwtga2HWDZtaY9dBxqVxsiLZ62L2uCbSPo,194
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=nBvLmEcR_f822dt6E85kmQRw5VrndUZe6iYFcLn4tdU,17331
4
+ gym_examples-2.0.105.dist-info/METADATA,sha256=u8x5g10kkCBjD5y0_wtZ3wmhcPjqR0jpUFrmmaRRqgw,412
5
+ gym_examples-2.0.105.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.105.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.105.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=15VEcOpMueBHNYHIt3EDlO6FrzL8mFhMJqyL0AZRS30,194
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=1OYfeewuK4q3pjSlrIrLfq4AE5vEYwtPmcfafondjkk,17171
4
- gym_examples-2.0.103.dist-info/METADATA,sha256=QiWeGHxC4txHuYbR5smnseOeTQ2d0RjvJxVLjSpRT-U,412
5
- gym_examples-2.0.103.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.103.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.103.dist-info/RECORD,,