gym-examples 3.0.402__py3-none-any.whl → 3.0.404__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -5,4 +5,4 @@ register(
5
5
  entry_point="gym_examples.envs:WSNRoutingEnv",
6
6
  )
7
7
 
8
- __version__ = "3.0.402"
8
+ __version__ = "3.0.404"
@@ -7,6 +7,7 @@ import csv
7
7
  from datetime import datetime
8
8
  import torch
9
9
  import torch.nn as nn
10
+ from gaussian_adaptive_attention import GaussianAdaptiveAttention
10
11
  import torch.nn.functional as F
11
12
  import os
12
13
  from collections import OrderedDict
@@ -28,29 +29,51 @@ latency_per_hop = 1 # latency per hop in seconds
28
29
  base_back_up_dir = "results/data/"
29
30
  max_reward = 1 # maximum reward value when the sensors sent data to the base station. The opposite value is when the sensors perform an unauthorized action
30
31
 
31
- class Attention(nn.Module):
32
- def __init__(self, input_dim, output_dim):
33
- super(Attention, self).__init__() # Call the initializer of the parent class (nn.Module)
34
- self.input_dim = input_dim # Set the input dimension of the network
35
- self.output_dim = output_dim # Set the output dimension of the network
36
- # self.linear1 = CustomizedLinear(input_dim, 64) # Define the first linear layer. It takes input of size 'input_dim' and outputs size '64'
37
- # self.linear2 = CustomizedLinear(64, output_dim) # Define the second linear layer. It takes input of size '64' and outputs size 'output_dim'
38
- self.linear1 = nn.Linear(input_dim, 64) # Define the first linear layer. It takes input of size 'input_dim' and outputs size '64'
39
- self.linear2 = nn.Linear(64, output_dim) # Define the second linear layer. It takes input of size '64' and outputs size 'output_dim'
40
-
32
+ # class Attention(nn.Module):
33
+ # def __init__(self, input_dim, output_dim):
34
+ # super(Attention, self).__init__() # Call the initializer of the parent class (nn.Module)
35
+ # self.input_dim = input_dim # Set the input dimension of the network
36
+ # self.output_dim = output_dim # Set the output dimension of the network
37
+ # # self.linear1 = CustomizedLinear(input_dim, 64) # Define the first linear layer. It takes input of size 'input_dim' and outputs size '64'
38
+ # # self.linear2 = CustomizedLinear(64, output_dim) # Define the second linear layer. It takes input of size '64' and outputs size 'output_dim'
39
+ # self.linear1 = nn.Linear(input_dim, 64) # Define the first linear layer. It takes input of size 'input_dim' and outputs size '64'
40
+ # self.linear2 = nn.Linear(64, output_dim) # Define the second linear layer. It takes input of size '64' and outputs size 'output_dim'
41
+
42
+
43
+ # def forward(self, x):
44
+ # # Step 1: Ensure input is 2D by adding a batch dimension if necessary
45
+ # if x.dim() == 1:
46
+ # x = x.unsqueeze(0) # Shape: [1, input_dim]
47
+ # # x = F.relu(self.linear1(x)) # Pass the input through a linear layer and a ReLU activation function
48
+ # x = F.softplus(self.linear1(x)) # Pass the input through a linear layer and a ReLU activation function
49
+ # attention_weights = F.softmax(x, dim=1) # Apply the softmax function to get the attention weights. Initially F.softmax(x, dim=0)
50
+ # x = attention_weights * x # Multiply the input by the attention weights
51
+ # x = self.linear2(x) # Pass the result through another linear layer
52
+ # return x
53
+
54
+ # net = Attention(input_dim, output_dim)
55
+ # net = net.double() # Convert the weights to Double
56
+
57
+ class ScalarAttentionModel(nn.Module):
58
+ def __init__(self, input_dim):
59
+ super(ScalarAttentionModel, self).__init__()
60
+ # Initialize GaussianAdaptiveAttention
61
+ self.ga_attention = GaussianAdaptiveAttention(
62
+ num_heads=1, # Single head for simplicity
63
+ num_gaussians=4, # Corresponds to your input length
64
+ attention_axis=0, # Attention along the first axis
65
+ )
66
+ self.output_layer = nn.Linear(input_dim, 1) # Map to scalar output
41
67
 
42
68
  def forward(self, x):
43
- # Step 1: Ensure input is 2D by adding a batch dimension if necessary
44
- if x.dim() == 1:
45
- x = x.unsqueeze(0) # Shape: [1, input_dim]
46
- # x = F.relu(self.linear1(x)) # Pass the input through a linear layer and a ReLU activation function
47
- x = F.softplus(self.linear1(x)) # Pass the input through a linear layer and a ReLU activation function
48
- attention_weights = F.softmax(x, dim=1) # Apply the softmax function to get the attention weights. Initially F.softmax(x, dim=0)
49
- x = attention_weights * x # Multiply the input by the attention weights
50
- x = self.linear2(x) # Pass the result through another linear layer
51
- return x
52
-
53
- net = Attention(input_dim, output_dim)
69
+ # Apply GaussianAdaptiveAttention
70
+ attention_output = self.ga_attention(x.unsqueeze(0)) # Add batch dim
71
+ attention_output = attention_output.squeeze(0) # Remove batch dim
72
+ # Map to scalar
73
+ scalar_output = self.output_layer(attention_output)
74
+ return scalar_output
75
+
76
+ net = ScalarAttentionModel(input_dim)
54
77
  net = net.double() # Convert the weights to Double
55
78
 
56
79
  class WSNRoutingEnv(gym.Env):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.402
3
+ Version: 3.0.404
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=kSSUOydEpYd6p-FwjBNXhpeRAH5NAZ7lXf2MEHD66RE,166
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=hLM8UtyVrLn-dm_8ZazOjR_iuBk9mLxzCwRRsIbopWs,27341
4
+ gym_examples-3.0.404.dist-info/METADATA,sha256=vrZnbnEa5pzhM2LTWiayHm5fMGebm-KKeVxMyBG_yuE,412
5
+ gym_examples-3.0.404.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.404.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.404.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=QqlM4kYrw8groETIibxHM_PQxmIt-bj9-Na76C4w5Lg,166
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=4t_GZDrpm_6OJtMK2AE3mz5km5x5mX8yXagdJk2aMC4,26284
4
- gym_examples-3.0.402.dist-info/METADATA,sha256=bTRc0E0PkHNEIVus7x26qfqXkiV4MtI1t4BWi__87zs,412
5
- gym_examples-3.0.402.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.402.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.402.dist-info/RECORD,,