gym-examples 3.0.402__py3-none-any.whl → 3.0.403__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gym_examples/__init__.py +1 -1
- gym_examples/envs/wsn_env.py +44 -21
- {gym_examples-3.0.402.dist-info → gym_examples-3.0.403.dist-info}/METADATA +1 -1
- gym_examples-3.0.403.dist-info/RECORD +7 -0
- gym_examples-3.0.402.dist-info/RECORD +0 -7
- {gym_examples-3.0.402.dist-info → gym_examples-3.0.403.dist-info}/WHEEL +0 -0
- {gym_examples-3.0.402.dist-info → gym_examples-3.0.403.dist-info}/top_level.txt +0 -0
gym_examples/__init__.py
CHANGED
gym_examples/envs/wsn_env.py
CHANGED
@@ -7,6 +7,7 @@ import csv
|
|
7
7
|
from datetime import datetime
|
8
8
|
import torch
|
9
9
|
import torch.nn as nn
|
10
|
+
from gaussian_adaptive_attention import GaussianAdaptiveAttention
|
10
11
|
import torch.nn.functional as F
|
11
12
|
import os
|
12
13
|
from collections import OrderedDict
|
@@ -28,29 +29,51 @@ latency_per_hop = 1 # latency per hop in seconds
|
|
28
29
|
base_back_up_dir = "results/data/"
|
29
30
|
max_reward = 1 # maximum reward value when the sensors sent data to the base station. The opposite value is when the sensors perform an unauthorized action
|
30
31
|
|
31
|
-
class Attention(nn.Module):
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
32
|
+
# class Attention(nn.Module):
|
33
|
+
# def __init__(self, input_dim, output_dim):
|
34
|
+
# super(Attention, self).__init__() # Call the initializer of the parent class (nn.Module)
|
35
|
+
# self.input_dim = input_dim # Set the input dimension of the network
|
36
|
+
# self.output_dim = output_dim # Set the output dimension of the network
|
37
|
+
# # self.linear1 = CustomizedLinear(input_dim, 64) # Define the first linear layer. It takes input of size 'input_dim' and outputs size '64'
|
38
|
+
# # self.linear2 = CustomizedLinear(64, output_dim) # Define the second linear layer. It takes input of size '64' and outputs size 'output_dim'
|
39
|
+
# self.linear1 = nn.Linear(input_dim, 64) # Define the first linear layer. It takes input of size 'input_dim' and outputs size '64'
|
40
|
+
# self.linear2 = nn.Linear(64, output_dim) # Define the second linear layer. It takes input of size '64' and outputs size 'output_dim'
|
41
|
+
|
42
|
+
|
43
|
+
# def forward(self, x):
|
44
|
+
# # Step 1: Ensure input is 2D by adding a batch dimension if necessary
|
45
|
+
# if x.dim() == 1:
|
46
|
+
# x = x.unsqueeze(0) # Shape: [1, input_dim]
|
47
|
+
# # x = F.relu(self.linear1(x)) # Pass the input through a linear layer and a ReLU activation function
|
48
|
+
# x = F.softplus(self.linear1(x)) # Pass the input through a linear layer and a ReLU activation function
|
49
|
+
# attention_weights = F.softmax(x, dim=1) # Apply the softmax function to get the attention weights. Initially F.softmax(x, dim=0)
|
50
|
+
# x = attention_weights * x # Multiply the input by the attention weights
|
51
|
+
# x = self.linear2(x) # Pass the result through another linear layer
|
52
|
+
# return x
|
53
|
+
|
54
|
+
# net = Attention(input_dim, output_dim)
|
55
|
+
# net = net.double() # Convert the weights to Double
|
56
|
+
|
57
|
+
class ScalarAttentionModel(nn.Module):
|
58
|
+
def __init__(self, input_dim):
|
59
|
+
super(ScalarAttentionModel, self).__init__()
|
60
|
+
# Initialize GaussianAdaptiveAttention
|
61
|
+
self.ga_attention = GaussianAdaptiveAttention(
|
62
|
+
num_heads=1, # Single head for simplicity
|
63
|
+
num_gaussians=4, # Corresponds to your input length
|
64
|
+
attention_axis=0, # Attention along the first axis
|
65
|
+
)
|
66
|
+
self.output_layer = nn.Linear(input_dim, 1) # Map to scalar output
|
41
67
|
|
42
68
|
def forward(self, x):
|
43
|
-
#
|
44
|
-
|
45
|
-
|
46
|
-
#
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
return x
|
52
|
-
|
53
|
-
net = Attention(input_dim, output_dim)
|
69
|
+
# Apply GaussianAdaptiveAttention
|
70
|
+
attention_output = self.ga_attention(x.unsqueeze(0)) # Add batch dim
|
71
|
+
attention_output = attention_output.squeeze(0) # Remove batch dim
|
72
|
+
# Map to scalar
|
73
|
+
scalar_output = self.output_layer(attention_output)
|
74
|
+
return scalar_output
|
75
|
+
|
76
|
+
net = ScalarAttentionModel(input_dim)
|
54
77
|
net = net.double() # Convert the weights to Double
|
55
78
|
|
56
79
|
class WSNRoutingEnv(gym.Env):
|
@@ -0,0 +1,7 @@
|
|
1
|
+
gym_examples/__init__.py,sha256=uZ4mM6NSjwxePQXM2-RWonNMiovwpzWrHPeC3sFQh2E,166
|
2
|
+
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
+
gym_examples/envs/wsn_env.py,sha256=hLM8UtyVrLn-dm_8ZazOjR_iuBk9mLxzCwRRsIbopWs,27341
|
4
|
+
gym_examples-3.0.403.dist-info/METADATA,sha256=2_y7Mzr61A562vvryVnwYkvu6QREaA63Il4UdBG-RGc,412
|
5
|
+
gym_examples-3.0.403.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
+
gym_examples-3.0.403.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
+
gym_examples-3.0.403.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
gym_examples/__init__.py,sha256=QqlM4kYrw8groETIibxHM_PQxmIt-bj9-Na76C4w5Lg,166
|
2
|
-
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
-
gym_examples/envs/wsn_env.py,sha256=4t_GZDrpm_6OJtMK2AE3mz5km5x5mX8yXagdJk2aMC4,26284
|
4
|
-
gym_examples-3.0.402.dist-info/METADATA,sha256=bTRc0E0PkHNEIVus7x26qfqXkiV4MtI1t4BWi__87zs,412
|
5
|
-
gym_examples-3.0.402.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
-
gym_examples-3.0.402.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
-
gym_examples-3.0.402.dist-info/RECORD,,
|
File without changes
|
File without changes
|