gym-examples 3.0.379__py3-none-any.whl → 3.0.381__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -5,4 +5,4 @@ register(
5
5
  entry_point="gym_examples.envs:WSNRoutingEnv",
6
6
  )
7
7
 
8
- __version__ = "3.0.379"
8
+ __version__ = "3.0.381"
@@ -59,7 +59,8 @@ class Attention(nn.Module):
59
59
  # Step 1: Ensure input is 2D by adding a batch dimension if necessary
60
60
  if x.dim() == 1:
61
61
  x = x.unsqueeze(0) # Shape: [1, input_dim]
62
- x = F.relu(self.linear1(x)) # Pass the input through a linear layer and a ReLU activation function
62
+ # x = F.relu(self.linear1(x)) # Pass the input through a linear layer and a ReLU activation function
63
+ x = F.Softplus(self.linear1(x)) # Pass the input through a linear layer and a ReLU activation function
63
64
  attention_weights = F.softmax(x, dim=1) # Apply the softmax function to get the attention weights. Initially F.softmax(x, dim=0)
64
65
  x = attention_weights * x # Multiply the input by the attention weights
65
66
  x = self.linear2(x) # Pass the result through another linear layer
@@ -196,7 +197,6 @@ class WSNRoutingEnv(gym.Env):
196
197
  self.packet_latency[i] = 0
197
198
 
198
199
  rewards[i] = [max_reward] * input_dim # Reward for transmitting data to the base station
199
- print(f"Sensor {i} transmitted data to the base station with modified reward: {self.compute_attention_rewards(rewards[i])}")
200
200
  dones[i] = True
201
201
  else:
202
202
  distance = np.linalg.norm(self.sensor_positions[i] - self.sensor_positions[action])
@@ -223,11 +223,7 @@ class WSNRoutingEnv(gym.Env):
223
223
  self.packet_latency[i] = 0
224
224
 
225
225
  rewards[i] = self.compute_individual_rewards(i, action)
226
- if self.number_of_steps > 27:
227
- raise Error("Stop here")
228
- else:
229
- print(f"Sensor {i} transmitted data to sensor {action} with modified reward: {self.compute_attention_rewards(rewards[i])}")
230
-
226
+
231
227
  # Update the number of packets
232
228
  self.number_of_packets[action] += self.number_of_packets[i]
233
229
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.379
3
+ Version: 3.0.381
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=-xF9v1R8lVyLNVmg8tKb1ZoPyow-1QIZ84QtzXD78rU,166
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=kbGt7hOHZIGso4NIyd0cm8TSbFx0RazDsXN8lAD5c6U,26990
4
+ gym_examples-3.0.381.dist-info/METADATA,sha256=snKPXRfg3uUbz2zqWT1c6-GBVPPh7VDOomY2laV1HRs,412
5
+ gym_examples-3.0.381.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.381.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.381.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=-ok5iR5eVxAwucS7NCfeCQPIdhS-25avnp1oxRQR1qg,166
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=3q-Xt1_1zZHatLkKYvkJUPYpHYoysZtolvMODkMKYHg,27263
4
- gym_examples-3.0.379.dist-info/METADATA,sha256=jgNbjP_koNOXpHUe51r4Xwi0F_nwC9-9WApQF-D4Pv4,412
5
- gym_examples-3.0.379.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.379.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.379.dist-info/RECORD,,