gym-examples 2.0.69__py3-none-any.whl → 2.0.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.69"
9
+ __version__ = "2.0.71"
@@ -109,6 +109,11 @@ class WSNRoutingEnv(gym.Env):
109
109
  # Calculate the energy consumption and remaining for transmitting data to the base station
110
110
  transmission_energy = self.transmission_energy(self.number_of_packets[i], self.distance_to_base[i])
111
111
  self.update_sensor_energies(i, transmission_energy)
112
+ # if np.sum(self.consumption_energy) != 0:
113
+ # print("\n=================================================")
114
+ # print(f"Sensor {i} transmits {self.number_of_packets[i]} packets to the base station.")
115
+ # print(f"Consumption energy: {np.sum(self.consumption_energy)}")
116
+ # print("=================================================\n")
112
117
  rewards[i] = self.compute_individual_rewards(i, action)
113
118
  dones[i] = True
114
119
  else:
@@ -116,7 +121,12 @@ class WSNRoutingEnv(gym.Env):
116
121
  transmission_energy = self.transmission_energy(self.number_of_packets[i], distance)
117
122
  reception_energy = self.reception_energy(self.number_of_packets[i])
118
123
  self.update_sensor_energies(i, transmission_energy)
119
- self.update_sensor_energies(action, reception_energy)
124
+ self.update_sensor_energies(action, reception_energy)
125
+ # if np.sum(self.consumption_energy) != 0:
126
+ # print("\n=================================================")
127
+ # print(f"Sensor {i} transmits {self.number_of_packets[i]} packets to sensor {action}.")
128
+ # print(f"Consumption energy: {np.sum(self.consumption_energy)}")
129
+ # print("=================================================\n")
120
130
  # Compute individual rewards
121
131
  rewards[i] = self.compute_individual_rewards(i, action)
122
132
  # Update the number of packets
@@ -134,6 +144,12 @@ class WSNRoutingEnv(gym.Env):
134
144
 
135
145
  # Call methods once and store results to avoid repeated calculations
136
146
  network_consumption_energy = self.network_reward_consumption_energy()
147
+ if np.sum(self.consumption_energy) != 0:
148
+ print("\n=================================================")
149
+ print(f"Total consumption energy: {np.sum(self.consumption_energy)}")
150
+ print(f"Network consumption energy: {network_consumption_energy}")
151
+ print(f"Network dispersion energy: {self.network_reward_dispersion_remaining_energy()}")
152
+ print("=================================================\n")
137
153
  network_dispersion_energy = self.network_reward_dispersion_remaining_energy()
138
154
  # Only proceed if network consumption energy is not zero to avoid unnecessary list comprehension
139
155
  if network_consumption_energy != 0:
@@ -301,10 +317,6 @@ class WSNRoutingEnv(gym.Env):
301
317
  Compute the reward based on the total energy consumption (transmission, reception) at the network level
302
318
  '''
303
319
  total_energy = np.sum(self.consumption_energy)
304
- if total_energy != 0:
305
- print("\n=================================================")
306
- print(f"Consumption_energy: {self.consumption_energy}")
307
- print("=================================================\n")
308
320
 
309
321
  # Normalize the total energy consumption
310
322
  max_transmission_energy = self.transmission_energy(self.n_sensors * initial_number_of_packets, self.coverage_radius)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.69
3
+ Version: 2.0.71
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=oIFKNEfjzmcQB27KrxtkT9n2F1EFKDl4m4tKpLiXJ00,193
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=IeJ9bEHxyvT7KsGadx2fbNQlT2ilRR82UOcAQPLgqbE,18135
4
+ gym_examples-2.0.71.dist-info/METADATA,sha256=3tIpIys5j-gJZrdJZmFRZLKn7IaMnzw44mqQe-RFGYA,411
5
+ gym_examples-2.0.71.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.71.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.71.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=f2TYKMfsb8pDtBFRryjIRdvSDx0BzVRDBUqHGypJhyA,193
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=TKcPJuoDpVKaQGDiF3XqJqUWItOaghc-xRfxycCK7bI,17069
4
- gym_examples-2.0.69.dist-info/METADATA,sha256=TP0AzKGqqlfAUONXrfuvcfjQI4rAL_r7TjruS4YzIIg,411
5
- gym_examples-2.0.69.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.69.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.69.dist-info/RECORD,,