gym-examples 3.0.405__py3-none-any.whl → 3.0.407__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gym_examples/__init__.py +1 -1
- gym_examples/envs/wsn_env.py +12 -4
- {gym_examples-3.0.405.dist-info → gym_examples-3.0.407.dist-info}/METADATA +1 -1
- gym_examples-3.0.407.dist-info/RECORD +7 -0
- gym_examples-3.0.405.dist-info/RECORD +0 -7
- {gym_examples-3.0.405.dist-info → gym_examples-3.0.407.dist-info}/WHEEL +0 -0
- {gym_examples-3.0.405.dist-info → gym_examples-3.0.407.dist-info}/top_level.txt +0 -0
gym_examples/__init__.py
CHANGED
gym_examples/envs/wsn_env.py
CHANGED
@@ -54,20 +54,28 @@ max_reward = 1 # maximum reward value when the sensors sent data to the base sta
|
|
54
54
|
# net = Attention(input_dim, output_dim)
|
55
55
|
# net = net.double() # Convert the weights to Double
|
56
56
|
|
57
|
+
import torch
|
58
|
+
import torch.nn as nn
|
59
|
+
from gaussian_adaptive_attention import GaussianAdaptiveAttention # Ensure this is properly imported
|
60
|
+
|
57
61
|
class ScalarAttentionModel(nn.Module):
|
58
62
|
def __init__(self, input_dim=4, output_dim=1):
|
59
63
|
super(ScalarAttentionModel, self).__init__()
|
60
64
|
# Initialize GaussianAdaptiveAttention
|
61
65
|
self.ga_attention = GaussianAdaptiveAttention(
|
62
|
-
|
63
|
-
|
66
|
+
norm_axis=1, # Normalize along feature axis
|
67
|
+
num_heads=1, # Single head for simplicity
|
68
|
+
num_gaussians=input_dim, # Match input length
|
69
|
+
padding_value=None, # Change if padding exists
|
64
70
|
)
|
65
71
|
self.output_layer = nn.Linear(input_dim, output_dim) # Map to scalar output
|
66
72
|
|
67
73
|
def forward(self, x):
|
74
|
+
# Ensure input is 2D
|
75
|
+
if x.dim() == 1:
|
76
|
+
x = x.unsqueeze(0) # Add batch dimension if necessary
|
68
77
|
# Apply GaussianAdaptiveAttention
|
69
|
-
attention_output = self.ga_attention(x
|
70
|
-
attention_output = attention_output.squeeze(0) # Remove batch dim
|
78
|
+
attention_output = self.ga_attention(x)
|
71
79
|
# Map to scalar
|
72
80
|
scalar_output = self.output_layer(attention_output)
|
73
81
|
return scalar_output
|
@@ -0,0 +1,7 @@
|
|
1
|
+
gym_examples/__init__.py,sha256=ZdyJAbpaBVO5SB4f8ZJVURJt-kN2ZD3Q0LZ4d_vbwTg,166
|
2
|
+
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
+
gym_examples/envs/wsn_env.py,sha256=pJVSjoGOfXumb5E_74ArsFywoh4BqGlV5azKU3qAbIw,27591
|
4
|
+
gym_examples-3.0.407.dist-info/METADATA,sha256=T3vwIZ4RAdJZmkdaHZnh6INcIQxA_sBdaZAUuyixNF4,412
|
5
|
+
gym_examples-3.0.407.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
+
gym_examples-3.0.407.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
+
gym_examples-3.0.407.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
gym_examples/__init__.py,sha256=40E0HKdRnFZJb4sIr2nsfLR9rwnswY_7xpJvqB8eSUU,166
|
2
|
-
gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
|
3
|
-
gym_examples/envs/wsn_env.py,sha256=GAZo9ChQOp8kvcBupQ-2iiiQPRvcFyTWsfcepiEdSes,27309
|
4
|
-
gym_examples-3.0.405.dist-info/METADATA,sha256=RqAZngR0abiyohGZKSfBaWLCGIz-epMXw7PnYEnv_2g,412
|
5
|
-
gym_examples-3.0.405.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6
|
-
gym_examples-3.0.405.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
|
7
|
-
gym_examples-3.0.405.dist-info/RECORD,,
|
File without changes
|
File without changes
|