gym-examples 3.0.81__py3-none-any.whl → 3.0.83__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gym_examples/__init__.py +1 -1
- gym_examples/envs/wsn_env.py +9 -6
- {gym_examples-3.0.81.dist-info → gym_examples-3.0.83.dist-info}/METADATA +1 -1
- gym_examples-3.0.83.dist-info/RECORD +7 -0
- gym_examples-3.0.81.dist-info/RECORD +0 -7
- {gym_examples-3.0.81.dist-info → gym_examples-3.0.83.dist-info}/WHEEL +0 -0
- {gym_examples-3.0.81.dist-info → gym_examples-3.0.83.dist-info}/top_level.txt +0 -0
gym_examples/__init__.py
CHANGED
gym_examples/envs/wsn_env.py
CHANGED
@@ -10,7 +10,7 @@ import torch.nn as nn
|
|
10
10
|
import torch.nn.functional as F
|
11
11
|
|
12
12
|
# Define the network parameters for the final reward function
|
13
|
-
input_dim =
|
13
|
+
input_dim = 7 # length of the individual rewards vector
|
14
14
|
output_dim = 1 # final reward
|
15
15
|
|
16
16
|
Eelec = 50e-9 # energy consumption per bit in joules
|
@@ -65,8 +65,7 @@ class WSNRoutingEnv(gym.Env):
|
|
65
65
|
self.reset()
|
66
66
|
|
67
67
|
|
68
|
-
def reset(self):
|
69
|
-
|
68
|
+
def reset(self):
|
70
69
|
self.sensor_positions = np.random.rand(self.n_sensors, 2) * (upper_bound - lower_bound) + lower_bound
|
71
70
|
self.distance_to_base = np.linalg.norm(self.sensor_positions - base_station_position, axis=1)
|
72
71
|
self.remaining_energy = np.ones(self.n_sensors) * initial_energy
|
@@ -308,9 +307,13 @@ class WSNRoutingEnv(gym.Env):
|
|
308
307
|
reward_packet_delivery_ratio = self.compute_reward_packet_delivery_ratio()
|
309
308
|
|
310
309
|
rewards_performance = np.array([reward_latency, reward_network_throughput, reward_packet_delivery_ratio])
|
311
|
-
|
312
|
-
|
313
|
-
|
310
|
+
if self.episode_count == 455:
|
311
|
+
print(f"Sensor: {i}")
|
312
|
+
print(f"Episode: {self.episode_count}")
|
313
|
+
print(f"Rewards energy: {rewards_energy}")
|
314
|
+
print(f"Rewards performance: {rewards_performance}")
|
315
|
+
return np.concatenate((rewards_energy, rewards_performance))
|
316
|
+
# return rewards_energy
|
314
317
|
|
315
318
|
def network_reward_dispersion_remaining_energy(self):
|
316
319
|
'''
|
@@ -0,0 +1,7 @@
|
|
1
|
+
gym_examples/__init__.py,sha256=QDtvnXb2Mx55f1nDFtydZEG4rBrtql9NsdptzX8iERo,193
|
2
|
+
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
+
gym_examples/envs/wsn_env.py,sha256=kvQViM19DyTdlmkIAUs_WL2UWIk46EiEV5LrWPCekUA,20630
|
4
|
+
gym_examples-3.0.83.dist-info/METADATA,sha256=YEoAiSUMbHaInr_ZMK0Nb6XLEByt9O9RfVefbAv1rJs,411
|
5
|
+
gym_examples-3.0.83.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
+
gym_examples-3.0.83.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
+
gym_examples-3.0.83.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
gym_examples/__init__.py,sha256=-tNBdDGkUAe9gRz0t2fK_jAloQqZCns0wnEGIXGdB_s,193
|
2
|
-
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
-
gym_examples/envs/wsn_env.py,sha256=onqDtWhDb4uXQtcpWZFILFujrH-AbVZXluV-pgYi3nA,20385
|
4
|
-
gym_examples-3.0.81.dist-info/METADATA,sha256=Id-4MFw_RH7WUdIcUlhbAsjDQMX05OIilQgWzKJ1h00,411
|
5
|
-
gym_examples-3.0.81.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
-
gym_examples-3.0.81.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
-
gym_examples-3.0.81.dist-info/RECORD,,
|
File without changes
|
File without changes
|