gym-examples 3.0.50__py3-none-any.whl → 3.0.52__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gym_examples/__init__.py +1 -1
- gym_examples/envs/wsn_env.py +5 -4
- {gym_examples-3.0.50.dist-info → gym_examples-3.0.52.dist-info}/METADATA +1 -1
- gym_examples-3.0.52.dist-info/RECORD +7 -0
- gym_examples-3.0.50.dist-info/RECORD +0 -7
- {gym_examples-3.0.50.dist-info → gym_examples-3.0.52.dist-info}/WHEEL +0 -0
- {gym_examples-3.0.50.dist-info → gym_examples-3.0.52.dist-info}/top_level.txt +0 -0
gym_examples/__init__.py
CHANGED
gym_examples/envs/wsn_env.py
CHANGED
@@ -10,7 +10,7 @@ import torch.nn as nn
|
|
10
10
|
import torch.nn.functional as F
|
11
11
|
|
12
12
|
# Define the network parameters for the final reward function
|
13
|
-
input_dim = 4 # length of the individual rewards vector
|
13
|
+
# input_dim = 4 # length of the individual rewards vector
|
14
14
|
output_dim = 1 # final reward
|
15
15
|
|
16
16
|
Eelec = 50e-9 # energy consumption per bit in joules
|
@@ -121,8 +121,8 @@ class WSNRoutingEnv(gym.Env):
|
|
121
121
|
self.total_latency += self.packet_latency[i] + latency_per_hop
|
122
122
|
self.packet_latency[i] = 0
|
123
123
|
|
124
|
-
|
125
|
-
rewards[i] = np.ones(input_dim) # Reward for transmitting data to the base station
|
124
|
+
rewards[i] = self.compute_individual_rewards(i, action)
|
125
|
+
# rewards[i] = np.ones(input_dim) # Reward for transmitting data to the base station
|
126
126
|
dones[i] = True
|
127
127
|
else:
|
128
128
|
distance = np.linalg.norm(self.sensor_positions[i] - self.sensor_positions[action])
|
@@ -363,8 +363,9 @@ class WSNRoutingEnv(gym.Env):
|
|
363
363
|
'''
|
364
364
|
Compute the attention-based rewards
|
365
365
|
'''
|
366
|
+
input_dim = len(rewards)
|
366
367
|
rewards = torch.tensor(rewards, dtype=torch.double)
|
367
|
-
net = Attention(
|
368
|
+
net = Attention(input_dim, output_dim)
|
368
369
|
net = net.double() # Convert the weights to Double
|
369
370
|
final_reward = net(rewards)
|
370
371
|
return final_reward
|
@@ -0,0 +1,7 @@
|
|
1
|
+
gym_examples/__init__.py,sha256=A6KLkBubgfhW5fQDHz3EDbZDpzky6zQieTm5YEgsZqg,193
|
2
|
+
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
+
gym_examples/envs/wsn_env.py,sha256=DI4KfntcvCq1Mmb95FU0fURIauLM2eeWj0j8iwM4j8Q,19980
|
4
|
+
gym_examples-3.0.52.dist-info/METADATA,sha256=FnVbIftlmP09idXliwYeVtGNu7V4zaKyTKH6OCtfyN8,411
|
5
|
+
gym_examples-3.0.52.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
+
gym_examples-3.0.52.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
+
gym_examples-3.0.52.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
gym_examples/__init__.py,sha256=5bGZE79MM87Lsi9G4pt9pzYriZjrdSe-u9MppmESRFY,193
|
2
|
-
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
-
gym_examples/envs/wsn_env.py,sha256=S4HYOGOsORx63BvKGPwYhDlRnVITWU6cY1ZN2lZLzDQ,19947
|
4
|
-
gym_examples-3.0.50.dist-info/METADATA,sha256=nUkf1NtLaCSfGMpgdwb7IzO_HIVUrbS5EfFsDFCzUtE,411
|
5
|
-
gym_examples-3.0.50.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
-
gym_examples-3.0.50.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
-
gym_examples-3.0.50.dist-info/RECORD,,
|
File without changes
|
File without changes
|