gym-examples 3.0.224__py3-none-any.whl → 3.0.226__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "3.0.224"
9
+ __version__ = "3.0.226"
@@ -11,7 +11,7 @@ import torch.nn.functional as F
11
11
  import os
12
12
 
13
13
  # Define the network parameters for the final reward function
14
- input_dim = 2 # length of the individual rewards vector
14
+ input_dim = 7 # length of the individual rewards vector
15
15
  output_dim = 1 # final reward
16
16
 
17
17
  Eelec = 50e-9 # energy consumption per bit in joules
@@ -166,8 +166,8 @@ class WSNRoutingEnv(gym.Env):
166
166
  self.packet_latency[i] = 0
167
167
 
168
168
  # rewards[i] = self.compute_individual_rewards(i, action)
169
- # rewards[i] = np.ones(input_dim) * max_reward # Reward for transmitting data to the base station
170
- rewards[i] = np.ones(input_dim) # Reward for transmitting data to the base station
169
+ rewards[i] = np.ones(input_dim) * max_reward # Reward for transmitting data to the base station
170
+ # rewards[i] = np.ones(input_dim) # Reward for transmitting data to the base station
171
171
  dones[i] = True
172
172
  else:
173
173
  distance = np.linalg.norm(self.sensor_positions[i] - self.sensor_positions[action])
@@ -391,8 +391,8 @@ class WSNRoutingEnv(gym.Env):
391
391
 
392
392
  rewards_performance = np.array([reward_latency, reward_network_throughput, reward_packet_delivery_ratio])
393
393
 
394
- # return np.concatenate((rewards_energy, rewards_performance))
395
- return np.array([reward_consumption_energy, reward_dispersion_remaining_energy])
394
+ return np.concatenate((rewards_energy, rewards_performance))
395
+ # return np.array([reward_consumption_energy, reward_dispersion_remaining_energy])
396
396
 
397
397
 
398
398
  def compute_network_rewards(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.224
3
+ Version: 3.0.226
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=zblH-5_lZeAcLQ1-3_KytHVwJby79q4zDmuRKoU_oLg,194
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=ZozBJYjlKTdIozRpK-hEvHT4MSAQcwFKgb4NFTwpDIA,26381
4
+ gym_examples-3.0.226.dist-info/METADATA,sha256=j2HVG5AGoZNI5RyiGrw1s_5VKdeFSIT87BYttXiqbA4,412
5
+ gym_examples-3.0.226.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.226.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.226.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=O3Do7Y6dV9a8VGxnVg0jOzmDPuyv0ISI_7DQcCG8Ihs,194
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=ykllANFl8ORgtX7T9HgcIgsnsAW-2bH0wxk-5tWu0Ng,26381
4
- gym_examples-3.0.224.dist-info/METADATA,sha256=2TY7rpy6ZJpqn7-jkVnfLsPekbtOoTmJdk1miX5Ckk8,412
5
- gym_examples-3.0.224.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.224.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.224.dist-info/RECORD,,