gym-examples 3.0.176__py3-none-any.whl → 3.0.178__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gym_examples/__init__.py +1 -1
- gym_examples/envs/wsn_env.py +50 -0
- {gym_examples-3.0.176.dist-info → gym_examples-3.0.178.dist-info}/METADATA +1 -1
- gym_examples-3.0.178.dist-info/RECORD +7 -0
- gym_examples-3.0.176.dist-info/RECORD +0 -7
- {gym_examples-3.0.176.dist-info → gym_examples-3.0.178.dist-info}/WHEEL +0 -0
- {gym_examples-3.0.176.dist-info → gym_examples-3.0.178.dist-info}/top_level.txt +0 -0
gym_examples/__init__.py
CHANGED
gym_examples/envs/wsn_env.py
CHANGED
@@ -27,6 +27,8 @@ latency_per_hop = 1 # latency per hop in seconds
|
|
27
27
|
coef_network_throughput = 2.6 # coefficient for the network throughput reward
|
28
28
|
coef_packet_delivery_ratio = 1.6 # coefficient for the packet delivery ratio reward
|
29
29
|
|
30
|
+
base_back_up_dir = "results/data/"
|
31
|
+
|
30
32
|
# Define the final reward function using an attention mechanism
|
31
33
|
class Attention(nn.Module):
|
32
34
|
def __init__(self, input_dim, output_dim):
|
@@ -56,6 +58,19 @@ class WSNRoutingEnv(gym.Env):
|
|
56
58
|
self.log_done = None # Log the done status of the environment
|
57
59
|
self.log_action = None # Log the action taken by the agent
|
58
60
|
self.log_steps = None # Log the number of steps taken by the agent
|
61
|
+
|
62
|
+
# Initialize list of episode metrics
|
63
|
+
episode_return = None
|
64
|
+
self.number_of_steps = 0 # Total number of steps taken by the agent since the beginning of the training
|
65
|
+
self.episode_returns = []
|
66
|
+
self.episode_std_remaining_energy = []
|
67
|
+
self.episode_mean_remaining_energy = []
|
68
|
+
self.episode_total_consumption_energy = []
|
69
|
+
self.episode_network_throughput = []
|
70
|
+
self.episode_packet_delivery_ratio = []
|
71
|
+
self.episode_network_lifetime = []
|
72
|
+
self.episode_average_latency = []
|
73
|
+
|
59
74
|
self.n_sensors = n_sensors
|
60
75
|
self.n_agents = n_sensors
|
61
76
|
self.coverage_radius = coverage_radius
|
@@ -75,7 +90,18 @@ class WSNRoutingEnv(gym.Env):
|
|
75
90
|
self.reset()
|
76
91
|
|
77
92
|
def reset(self):
|
93
|
+
|
94
|
+
episode_return = 0
|
95
|
+
|
78
96
|
if self.episode_count > 1 and os.getenv('PRINT_STATS') == 'True':
|
97
|
+
self.episode_returns.append(episode_return)
|
98
|
+
self.episode_std_remaining_energy.append(np.std(self.remaining_energy))
|
99
|
+
self.episode_mean_remaining_energy.append(np.mean(self.remaining_energy))
|
100
|
+
self.episode_total_consumption_energy.append(np.sum(initial_energy - self.remaining_energy))
|
101
|
+
self.episode_network_throughput.append(self.network_throughput)
|
102
|
+
self.episode_packet_delivery_ratio.append(self.packet_delivery_ratio)
|
103
|
+
self.episode_network_lifetime.append(self.network_lifetime)
|
104
|
+
self.episode_average_latency.append(self.average_latency)
|
79
105
|
print(f"Episode: {self.episode_count}")
|
80
106
|
print(f"This episode ends with # Steps: {self.log_steps}")
|
81
107
|
print(f"This episode ends with Done: {self.log_done}")
|
@@ -106,6 +132,8 @@ class WSNRoutingEnv(gym.Env):
|
|
106
132
|
def step(self, actions):
|
107
133
|
actions = [actions[i] for i in range(self.n_agents)] # We want to go back from the MultiDiscrete action space to a tuple of tuple of Discrete action spaces
|
108
134
|
self.steps += 1
|
135
|
+
if os.getenv('PRINT_STATS') == 'True':
|
136
|
+
self.number_of_steps += 1
|
109
137
|
rewards = [0] * self.n_sensors
|
110
138
|
dones = [False] * self.n_sensors
|
111
139
|
for i, action in enumerate(actions):
|
@@ -192,6 +220,28 @@ class WSNRoutingEnv(gym.Env):
|
|
192
220
|
self.log_done = dones
|
193
221
|
self.log_action = actions
|
194
222
|
self.log_steps = self.steps
|
223
|
+
episode_return += rewards
|
224
|
+
|
225
|
+
if os.getenv('PRINT_STATS') == 'True': # We are trying to extract only the statistics for the PPO algorithm
|
226
|
+
if self.number_of_steps >= num_timesteps:
|
227
|
+
self.episode_returns.append(episode_return)
|
228
|
+
self.episode_std_remaining_energy.append(np.std(self.remaining_energy))
|
229
|
+
self.episode_mean_remaining_energy.append(np.mean(self.remaining_energy))
|
230
|
+
self.episode_total_consumption_energy.append(np.sum(initial_energy - self.remaining_energy))
|
231
|
+
self.episode_network_throughput.append(self.network_throughput)
|
232
|
+
self.episode_packet_delivery_ratio.append(self.packet_delivery_ratio)
|
233
|
+
self.episode_network_lifetime.append(self.network_lifetime)
|
234
|
+
self.episode_average_latency.append(self.average_latency)
|
235
|
+
|
236
|
+
np.save(f"{base_back_up_dir}returns_QMIX_{version}.npy", np.array(self.episode_returns))
|
237
|
+
np.save(f"{base_back_up_dir}std_remaining_energy_QMIX_{version}.npy", np.array(self.episode_std_remaining_energy))
|
238
|
+
np.save(f"{base_back_up_dir}total_consumption_energy_QMIX_{version}.npy", np.array(self.episode_total_consumption_energy))
|
239
|
+
np.save(f"{base_back_up_dir}mean_remaining_energy_QMIX_{version}.npy", np.array(self.episode_mean_remaining_energy))
|
240
|
+
np.save(f"{base_back_up_dir}network_throughput_QMIX_{version}.npy", np.array(self.episode_network_throughput))
|
241
|
+
np.save(f"{base_back_up_dir}packet_delivery_ratio_QMIX_{version}.npy", np.array(self.episode_packet_delivery_ratio))
|
242
|
+
np.save(f"{base_back_up_dir}network_lifetime_QMIX_{version}.npy", np.array(self.episode_network_lifetime))
|
243
|
+
np.save(f"{base_back_up_dir}average_latency_QMIX_{version}.npy", np.array(self.episode_average_latency))
|
244
|
+
|
195
245
|
return self._get_obs(), rewards, dones, {}
|
196
246
|
|
197
247
|
def _get_obs(self):
|
@@ -0,0 +1,7 @@
|
|
1
|
+
gym_examples/__init__.py,sha256=dKB7fxf-b5wOgFhNlcNdhFqGY0_sagdM3FVbJyd2xBM,194
|
2
|
+
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
+
gym_examples/envs/wsn_env.py,sha256=MsDYLY0-6ngqgziKcJ5BET701GWX6OU1qoh3FMTezcs,26238
|
4
|
+
gym_examples-3.0.178.dist-info/METADATA,sha256=zRHR4aBsr9tc5dwMeqysMfhLm8823FOnSCj7tph9DaM,412
|
5
|
+
gym_examples-3.0.178.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
+
gym_examples-3.0.178.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
+
gym_examples-3.0.178.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
gym_examples/__init__.py,sha256=i7wmRRD6heJrXozHrhxzTPi1vwiSSWbwl0KImDTLlHI,194
|
2
|
-
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
-
gym_examples/envs/wsn_env.py,sha256=YqXpYZXdd5lW3m2Hohog4iirLHORObEECs3QVr341Rg,22968
|
4
|
-
gym_examples-3.0.176.dist-info/METADATA,sha256=TTwYP8pX18ClNKCpUqR0oHggJLZgcrzxl5pV7VSn3t0,412
|
5
|
-
gym_examples-3.0.176.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
-
gym_examples-3.0.176.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
-
gym_examples-3.0.176.dist-info/RECORD,,
|
File without changes
|
File without changes
|