gym-examples 3.0.752__py3-none-any.whl → 3.0.754__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -5,4 +5,4 @@ register(
5
5
  entry_point="gym_examples.envs:WSNRoutingEnv",
6
6
  )
7
7
 
8
- __version__ = "3.0.752"
8
+ __version__ = "3.0.754"
@@ -48,7 +48,7 @@ class ScalarAttentionModel(nn.Module):
48
48
  # Apply GaussianAdaptiveAttention
49
49
  attention_output = self.ga_attention(x)
50
50
  # Map to scalar
51
- scalar_output = self.output_layer(attention_output).mean()
51
+ scalar_output = self.output_layer(attention_output)
52
52
  return scalar_output
53
53
 
54
54
  net = ScalarAttentionModel(input_dim)
@@ -470,10 +470,14 @@ class WSNRoutingEnv(gym.Env):
470
470
  '''
471
471
  Compute the attention-based rewards
472
472
  '''
473
- rewards = torch.tensor(rewards, dtype=torch.double)
474
- rewards = rewards.unsqueeze(0) # Add batch dimension
475
- final_reward = net(rewards)
476
- return final_reward
473
+ final_reward = []
474
+ for i in range(len(rewards[0])):
475
+ rewards_i = [reward[i] for reward in rewards]
476
+ rewards_i = torch.tensor(rewards_i, dtype=torch.double)
477
+ rewards_i = rewards_i.unsqueeze(0) # Add batch dimension
478
+ final_reward.append(net(rewards_i))
479
+
480
+ return final_reward.mean().item()
477
481
 
478
482
 
479
483
  # def compute_attention_reward(self, rewards):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.752
3
+ Version: 3.0.754
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=AfgfcIhRLQ13gj-B6lbC4iTEtYaTWCO14JRFDumF1dA,166
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=I2px13TJyNn93cCQkO22hI5WuIJo_HT3v0ZfBqk7_0k,26856
4
+ gym_examples-3.0.754.dist-info/METADATA,sha256=OrruAdPUHtFXCbUzZNRWkDB8MHpR2zMNqpGNY5ZFHOw,412
5
+ gym_examples-3.0.754.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.754.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.754.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=cSYjJ492gSXyqSOk2FqKhhWRXCx7ZzFuBbf1AuBd038,166
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=Cb6Og_FyZfyy1xrglch7CdUBsGvFT0QEpyX_cHCc8C4,26684
4
- gym_examples-3.0.752.dist-info/METADATA,sha256=_ZQEIPcDa066N80eabTuL1dFKRBgRgwa131wsKmcpxU,412
5
- gym_examples-3.0.752.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.752.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.752.dist-info/RECORD,,