gym-examples 3.0.50__py3-none-any.whl → 3.0.51__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "3.0.50"
9
+ __version__ = "3.0.51"
@@ -10,7 +10,7 @@ import torch.nn as nn
10
10
  import torch.nn.functional as F
11
11
 
12
12
  # Define the network parameters for the final reward function
13
- input_dim = 4 # length of the individual rewards vector
13
+ # input_dim = 4 # length of the individual rewards vector
14
14
  output_dim = 1 # final reward
15
15
 
16
16
  Eelec = 50e-9 # energy consumption per bit in joules
@@ -121,8 +121,8 @@ class WSNRoutingEnv(gym.Env):
121
121
  self.total_latency += self.packet_latency[i] + latency_per_hop
122
122
  self.packet_latency[i] = 0
123
123
 
124
- # rewards[i] = self.compute_individual_rewards(i, action)
125
- rewards[i] = np.ones(input_dim) # Reward for transmitting data to the base station
124
+ rewards[i] = self.compute_individual_rewards(i, action)
125
+ # rewards[i] = np.ones(input_dim) # Reward for transmitting data to the base station
126
126
  dones[i] = True
127
127
  else:
128
128
  distance = np.linalg.norm(self.sensor_positions[i] - self.sensor_positions[action])
@@ -363,8 +363,10 @@ class WSNRoutingEnv(gym.Env):
363
363
  '''
364
364
  Compute the attention-based rewards
365
365
  '''
366
+ input_dim = len(rewards)
366
367
  rewards = torch.tensor(rewards, dtype=torch.double)
367
- net = Attention(len(rewards), output_dim)
368
+ print(f"input_dim_modified: {len(rewards)}")
369
+ net = Attention(input_dim, output_dim)
368
370
  net = net.double() # Convert the weights to Double
369
371
  final_reward = net(rewards)
370
372
  return final_reward
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.50
3
+ Version: 3.0.51
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=Mw543YXe6ddweCkbMiBJIu5taTJQMEm-gNIu3Omp8e8,193
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=DuacU7Dou0YVfINsYsR7NorKUgchUdz7Px5-rwlKFAM,20034
4
+ gym_examples-3.0.51.dist-info/METADATA,sha256=vopRCS5_QmK3Gc2le27GqNMnovSG4lZtGoeWXFyyy1M,411
5
+ gym_examples-3.0.51.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.51.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.51.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=5bGZE79MM87Lsi9G4pt9pzYriZjrdSe-u9MppmESRFY,193
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=S4HYOGOsORx63BvKGPwYhDlRnVITWU6cY1ZN2lZLzDQ,19947
4
- gym_examples-3.0.50.dist-info/METADATA,sha256=nUkf1NtLaCSfGMpgdwb7IzO_HIVUrbS5EfFsDFCzUtE,411
5
- gym_examples-3.0.50.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.50.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.50.dist-info/RECORD,,