gym-examples 2.0.73__py3-none-any.whl → 2.0.75__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gym_examples/__init__.py +1 -1
- gym_examples/envs/wsn_env.py +2 -21
- {gym_examples-2.0.73.dist-info → gym_examples-2.0.75.dist-info}/METADATA +1 -1
- gym_examples-2.0.75.dist-info/RECORD +7 -0
- gym_examples-2.0.73.dist-info/RECORD +0 -7
- {gym_examples-2.0.73.dist-info → gym_examples-2.0.75.dist-info}/WHEEL +0 -0
- {gym_examples-2.0.73.dist-info → gym_examples-2.0.75.dist-info}/top_level.txt +0 -0
gym_examples/__init__.py
CHANGED
gym_examples/envs/wsn_env.py
CHANGED
@@ -108,12 +108,7 @@ class WSNRoutingEnv(gym.Env):
|
|
108
108
|
if action == self.n_sensors:
|
109
109
|
# Calculate the energy consumption and remaining for transmitting data to the base station
|
110
110
|
transmission_energy = self.transmission_energy(self.number_of_packets[i], self.distance_to_base[i])
|
111
|
-
self.update_sensor_energies(i, transmission_energy)
|
112
|
-
# if np.sum(self.consumption_energy) != 0:
|
113
|
-
# print("\n=================================================")
|
114
|
-
# print(f"Sensor {i} transmits {self.number_of_packets[i]} packets to the base station.")
|
115
|
-
# print(f"Consumption energy: {np.sum(self.consumption_energy)}")
|
116
|
-
# print("=================================================\n")
|
111
|
+
self.update_sensor_energies(i, transmission_energy)
|
117
112
|
rewards[i] = self.compute_individual_rewards(i, action)
|
118
113
|
dones[i] = True
|
119
114
|
else:
|
@@ -121,12 +116,7 @@ class WSNRoutingEnv(gym.Env):
|
|
121
116
|
transmission_energy = self.transmission_energy(self.number_of_packets[i], distance)
|
122
117
|
reception_energy = self.reception_energy(self.number_of_packets[i])
|
123
118
|
self.update_sensor_energies(i, transmission_energy)
|
124
|
-
self.update_sensor_energies(action, reception_energy)
|
125
|
-
# if np.sum(self.consumption_energy) != 0:
|
126
|
-
# print("\n=================================================")
|
127
|
-
# print(f"Sensor {i} transmits {self.number_of_packets[i]} packets to sensor {action}.")
|
128
|
-
# print(f"Consumption energy: {np.sum(self.consumption_energy)}")
|
129
|
-
# print("=================================================\n")
|
119
|
+
self.update_sensor_energies(action, reception_energy)
|
130
120
|
# Compute individual rewards
|
131
121
|
rewards[i] = self.compute_individual_rewards(i, action)
|
132
122
|
# Update the number of packets
|
@@ -144,12 +134,6 @@ class WSNRoutingEnv(gym.Env):
|
|
144
134
|
|
145
135
|
# Call methods once and store results to avoid repeated calculations
|
146
136
|
network_consumption_energy = self.network_reward_consumption_energy()
|
147
|
-
if np.sum(self.consumption_energy) != 0:
|
148
|
-
print("\n=================================================")
|
149
|
-
print(f"Total consumption energy: {np.sum(self.consumption_energy)}")
|
150
|
-
print(f"Network consumption energy: {network_consumption_energy}")
|
151
|
-
print(f"Network dispersion energy: {self.network_reward_dispersion_remaining_energy()}")
|
152
|
-
print("=================================================\n")
|
153
137
|
network_dispersion_energy = self.network_reward_dispersion_remaining_energy()
|
154
138
|
# Only proceed if network consumption energy is not zero to avoid unnecessary list comprehension
|
155
139
|
if network_consumption_energy != 0:
|
@@ -317,12 +301,9 @@ class WSNRoutingEnv(gym.Env):
|
|
317
301
|
Compute the reward based on the total energy consumption (transmission, reception) at the network level
|
318
302
|
'''
|
319
303
|
total_energy = np.sum(self.consumption_energy)
|
320
|
-
print(f"Inside network_reward_consumption_energy, total energy: {total_energy}")
|
321
304
|
# Normalize the total energy consumption
|
322
305
|
max_total_energy = self.n_sensors * initial_energy
|
323
|
-
print(f"Inside network_reward_consumption_energy, max total energy: {max_total_energy}")
|
324
306
|
normalized_total_energy = total_energy / max_total_energy
|
325
|
-
print(f"Inside network_reward_consumption_energy, normalized total energy: {normalized_total_energy}")
|
326
307
|
|
327
308
|
return np.clip(1 - normalized_total_energy, 0, 1)
|
328
309
|
|
@@ -0,0 +1,7 @@
|
|
1
|
+
gym_examples/__init__.py,sha256=KhqtzHp8-RurAAUjaZuxNFkP_xrA_6fX7nBcNDPOEfs,193
|
2
|
+
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
+
gym_examples/envs/wsn_env.py,sha256=nzkby8VP2ADq4CUebDExqQwElLjdpdMr-bfLiaXd1qc,16620
|
4
|
+
gym_examples-2.0.75.dist-info/METADATA,sha256=dUOLYGe9cogkJyf0aCriNRNVF1YcGYkwHLghm0FV6lY,411
|
5
|
+
gym_examples-2.0.75.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
+
gym_examples-2.0.75.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
+
gym_examples-2.0.75.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
gym_examples/__init__.py,sha256=LnsUr0dHN4iXm9U9y-GKicgaPBWok_Yw28IYzLsF2nA,193
|
2
|
-
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
-
gym_examples/envs/wsn_env.py,sha256=XSB2_uh4XSYJ8900Y91Hh4R3r37SeKsPZZHxU5uuQsk,18237
|
4
|
-
gym_examples-2.0.73.dist-info/METADATA,sha256=kg5dqMd3x68DFKeALLv0nD021r4gsivWJpx-6hJCI0Y,411
|
5
|
-
gym_examples-2.0.73.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
-
gym_examples-2.0.73.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
-
gym_examples-2.0.73.dist-info/RECORD,,
|
File without changes
|
File without changes
|