gym-examples 2.0.72__py3-none-any.whl → 2.0.74__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.72"
9
+ __version__ = "2.0.74"
@@ -108,12 +108,7 @@ class WSNRoutingEnv(gym.Env):
108
108
  if action == self.n_sensors:
109
109
  # Calculate the energy consumption and remaining for transmitting data to the base station
110
110
  transmission_energy = self.transmission_energy(self.number_of_packets[i], self.distance_to_base[i])
111
- self.update_sensor_energies(i, transmission_energy)
112
- # if np.sum(self.consumption_energy) != 0:
113
- # print("\n=================================================")
114
- # print(f"Sensor {i} transmits {self.number_of_packets[i]} packets to the base station.")
115
- # print(f"Consumption energy: {np.sum(self.consumption_energy)}")
116
- # print("=================================================\n")
111
+ self.update_sensor_energies(i, transmission_energy)
117
112
  rewards[i] = self.compute_individual_rewards(i, action)
118
113
  dones[i] = True
119
114
  else:
@@ -121,12 +116,7 @@ class WSNRoutingEnv(gym.Env):
121
116
  transmission_energy = self.transmission_energy(self.number_of_packets[i], distance)
122
117
  reception_energy = self.reception_energy(self.number_of_packets[i])
123
118
  self.update_sensor_energies(i, transmission_energy)
124
- self.update_sensor_energies(action, reception_energy)
125
- # if np.sum(self.consumption_energy) != 0:
126
- # print("\n=================================================")
127
- # print(f"Sensor {i} transmits {self.number_of_packets[i]} packets to sensor {action}.")
128
- # print(f"Consumption energy: {np.sum(self.consumption_energy)}")
129
- # print("=================================================\n")
119
+ self.update_sensor_energies(action, reception_energy)
130
120
  # Compute individual rewards
131
121
  rewards[i] = self.compute_individual_rewards(i, action)
132
122
  # Update the number of packets
@@ -144,12 +134,6 @@ class WSNRoutingEnv(gym.Env):
144
134
 
145
135
  # Call methods once and store results to avoid repeated calculations
146
136
  network_consumption_energy = self.network_reward_consumption_energy()
147
- if np.sum(self.consumption_energy) != 0:
148
- print("\n=================================================")
149
- print(f"Total consumption energy: {np.sum(self.consumption_energy)}")
150
- print(f"Network consumption energy: {network_consumption_energy}")
151
- print(f"Network dispersion energy: {self.network_reward_dispersion_remaining_energy()}")
152
- print("=================================================\n")
153
137
  network_dispersion_energy = self.network_reward_dispersion_remaining_energy()
154
138
  # Only proceed if network consumption energy is not zero to avoid unnecessary list comprehension
155
139
  if network_consumption_energy != 0:
@@ -317,14 +301,9 @@ class WSNRoutingEnv(gym.Env):
317
301
  Compute the reward based on the total energy consumption (transmission, reception) at the network level
318
302
  '''
319
303
  total_energy = np.sum(self.consumption_energy)
320
- print(f"Inside network_reward_consumption_energy, total energy: {total_energy}")
321
304
  # Normalize the total energy consumption
322
- max_transmission_energy = self.transmission_energy(self.n_sensors * initial_number_of_packets, self.coverage_radius)
323
- print(f"Inside network_reward_consumption_energy, max transmission energy: {max_transmission_energy}")
324
- max_reception_energy = self.reception_energy(self.n_sensors * initial_number_of_packets)
325
- print(f"Inside network_reward_consumption_energy, max reception energy: {max_reception_energy}")
326
- normalized_total_energy = total_energy / (max_transmission_energy + max_reception_energy)
327
- print(f"Inside network_reward_consumption_energy, normalized total energy: {normalized_total_energy}")
305
+ max_total_energy = self.n_sensors * initial_energy
306
+ normalized_total_energy = total_energy / max_total_energy
328
307
 
329
308
  return np.clip(1 - normalized_total_energy, 0, 1)
330
309
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.72
3
+ Version: 2.0.74
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=b6eULvkNLM9uMNqjXtAmWboWIPZ1ydCHLooKGpegjcA,193
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=nzkby8VP2ADq4CUebDExqQwElLjdpdMr-bfLiaXd1qc,16620
4
+ gym_examples-2.0.74.dist-info/METADATA,sha256=IzDB951sQkhW7PDYOd2QWGJIJNnKzJSgg5NXhIWujBQ,411
5
+ gym_examples-2.0.74.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.74.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.74.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=rTuZiKxgQrl6_lA1u6-ac60xg1CRFlJOlEX6GKSvEfk,193
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=SorBJOVj9Jz4x-VosrChGaaesorWcd7K-qJvwhIZFAA,18553
4
- gym_examples-2.0.72.dist-info/METADATA,sha256=6_b2HqQbQ_FccJaGcgcsDI2Z9LOFbPmuAdfXhRiBs3Q,411
5
- gym_examples-2.0.72.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.72.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.72.dist-info/RECORD,,