gym-examples 2.0.53__py3-none-any.whl → 2.0.55__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.53"
9
+ __version__ = "2.0.55"
@@ -117,9 +117,10 @@ class WSNRoutingEnv(gym.Env):
117
117
  # final_reward = net(rewards_individual)
118
118
  final_reward = np.sum(rewards[i])
119
119
  rewards[i] = final_reward
120
-
120
+ print("\n=================================================")
121
+ print(f"Rewards: {[[rewards[i], self.network_reward_consumption_energy(), self.network_reward_dispersion_remaining_energy()] for i in range(self.n_sensors)]}")
121
122
  rewards = [0.5 * r + 0.5 * (1/self.n_sensors) * (self.network_reward_consumption_energy() + self.network_reward_dispersion_remaining_energy()) for r in rewards]
122
-
123
+ print("==================================================\n")
123
124
  for i in range(self.n_sensors):
124
125
  if (self.remaining_energy[i] <= 0) or (self.number_of_packets[i] <= 0):
125
126
  dones[i] = True
@@ -211,12 +212,14 @@ class WSNRoutingEnv(gym.Env):
211
212
  distance = np.linalg.norm(self.sensor_positions[i] - self.sensor_positions[action])
212
213
  transmission_energy = self.transmission_energy(self.number_of_packets[i], distance)
213
214
  reception_energy = self.reception_energy(self.number_of_packets[i])
214
- total_energy = transmission_energy + reception_energy
215
+ # total_energy = transmission_energy + reception_energy
216
+ total_energy = transmission_energy
215
217
 
216
218
  # Normalize the total energy consumption
217
219
  max_transmission_energy = self.transmission_energy(self.n_sensors * initial_number_of_packets, self.coverage_radius)
218
220
  max_reception_energy = self.reception_energy(self.n_sensors * initial_number_of_packets)
219
- max_total_energy = max_transmission_energy + max_reception_energy
221
+ # max_total_energy = max_transmission_energy + max_reception_energy
222
+ max_total_energy = max_transmission_energy
220
223
  normalized_total_energy = total_energy / max_total_energy
221
224
 
222
225
  return np.clip(1 - normalized_total_energy, 0, 1)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.53
3
+ Version: 2.0.55
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=vnWhhwCczJ-SFYD9e9tkVw0v8gya25yTvJHPjBeSm-U,193
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=aoL7il1bOsPoyBp7HchTRRpumVMIiBAD7kDD53jIck0,15411
4
+ gym_examples-2.0.55.dist-info/METADATA,sha256=BAqN3wGi55dKSDpBVPjBbSXbCMccK7M8jIO2Ry1AnXY,411
5
+ gym_examples-2.0.55.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.55.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.55.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=0YWZrcieJEfliF4Ku7g2lo2v7dASNAvAZAD_3Xrl-w0,193
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=lf0L1bG5EVco12NmYdKIytQj8_jcrmGKNm8IaJ4ag94,15001
4
- gym_examples-2.0.53.dist-info/METADATA,sha256=helIqQBo4fn8L7nrBf3UIiWfhGC2G3hKkavpCE8fYMI,411
5
- gym_examples-2.0.53.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.53.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.53.dist-info/RECORD,,