gym-examples 3.0.80__py3-none-any.whl → 3.0.81__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "3.0.80"
9
+ __version__ = "3.0.81"
@@ -91,7 +91,7 @@ class WSNRoutingEnv(gym.Env):
91
91
 
92
92
  def step(self, actions):
93
93
  self.steps += 1
94
- rewards = [-1] * self.n_sensors
94
+ rewards = [0] * self.n_sensors
95
95
  dones = [False] * self.n_sensors
96
96
  for i, action in enumerate(actions):
97
97
  if action not in range(self.n_sensors + 1):
@@ -225,8 +225,8 @@ class WSNRoutingEnv(gym.Env):
225
225
  # Normalize the angle
226
226
  normalized_angle = abs(angle) / np.pi
227
227
 
228
- # return np.clip(1 - normalized_angle, 0, 1)
229
- return np.clip(- normalized_angle, -1, 1)
228
+ return np.clip(1 - normalized_angle, 0, 1)
229
+ # return np.clip(- normalized_angle, -1, 1)
230
230
 
231
231
  def compute_reward_distance(self, i, action):
232
232
  '''
@@ -239,8 +239,8 @@ class WSNRoutingEnv(gym.Env):
239
239
  # Normalize the distance to the next hop
240
240
  normalized_distance_to_next_hop = distance / self.coverage_radius
241
241
 
242
- # return np.clip(1 - normalized_distance_to_next_hop, 0, 1)
243
- return np.clip(-normalized_distance_to_next_hop, -1, 1)
242
+ return np.clip(1 - normalized_distance_to_next_hop, 0, 1)
243
+ # return np.clip(-normalized_distance_to_next_hop, -1, 1)
244
244
 
245
245
  def compute_reward_consumption_energy(self, i, action):
246
246
  '''
@@ -261,8 +261,8 @@ class WSNRoutingEnv(gym.Env):
261
261
  max_total_energy = max_transmission_energy + max_reception_energy
262
262
  normalized_total_energy = total_energy / (max_total_energy + self.epsilon)
263
263
 
264
- # return np.clip(1 - normalized_total_energy, 0, 1)
265
- return np.clip(- normalized_total_energy, -1, 1)
264
+ return np.clip(1 - normalized_total_energy, 0, 1)
265
+ # return np.clip(- normalized_total_energy, -1, 1)
266
266
 
267
267
  def compute_reward_dispersion_remaining_energy(self):
268
268
  '''
@@ -273,8 +273,8 @@ class WSNRoutingEnv(gym.Env):
273
273
  max_dispersion_remaining_energy = initial_energy / 2 # maximum standard deviation of the remaining energy if n_sensors is even
274
274
  normalized_dispersion_remaining_energy = dispersion_remaining_energy / (max_dispersion_remaining_energy + self.epsilon)
275
275
 
276
- # return np.clip(1 - normalized_dispersion_remaining_energy, 0, 1)
277
- return np.clip(- normalized_dispersion_remaining_energy, -1, 1)
276
+ return np.clip(1 - normalized_dispersion_remaining_energy, 0, 1)
277
+ # return np.clip(- normalized_dispersion_remaining_energy, -1, 1)
278
278
 
279
279
  def compute_reward_number_of_packets(self, action):
280
280
  '''
@@ -286,8 +286,8 @@ class WSNRoutingEnv(gym.Env):
286
286
  else:
287
287
  normalized_number_of_packets = self.number_of_packets[action] / (max_number_of_packets + self.epsilon)
288
288
 
289
- # return np.clip(1 - normalized_number_of_packets, 0, 1)
290
- return np.clip(- normalized_number_of_packets, -1, 1)
289
+ return np.clip(1 - normalized_number_of_packets, 0, 1)
290
+ # return np.clip(- normalized_number_of_packets, -1, 1)
291
291
 
292
292
  def compute_individual_rewards(self, i, action):
293
293
  '''
@@ -321,8 +321,8 @@ class WSNRoutingEnv(gym.Env):
321
321
  max_dispersion_remaining_energy = initial_energy / 2 # maximum standard deviation of the remaining energy if n_sensors is even
322
322
  normalized_dispersion_remaining_energy = dispersion_remaining_energy / (max_dispersion_remaining_energy + self.epsilon)
323
323
 
324
- # return np.clip(1 - normalized_dispersion_remaining_energy, 0, 1)
325
- return np.clip(- normalized_dispersion_remaining_energy, -1, 1)
324
+ return np.clip(1 - normalized_dispersion_remaining_energy, 0, 1)
325
+ # return np.clip(- normalized_dispersion_remaining_energy, -1, 1)
326
326
 
327
327
  def network_reward_consumption_energy(self):
328
328
  '''
@@ -333,8 +333,8 @@ class WSNRoutingEnv(gym.Env):
333
333
  max_total_energy = self.n_sensors * initial_energy
334
334
  normalized_total_energy = total_energy / (max_total_energy + self.epsilon)
335
335
 
336
- # return np.clip(1 - normalized_total_energy, 0, 1)
337
- return np.clip(- normalized_total_energy, -1, 1)
336
+ return np.clip(1 - normalized_total_energy, 0, 1)
337
+ # return np.clip(- normalized_total_energy, -1, 1)
338
338
 
339
339
  def compute_reward_packet_delivery_ratio(self):
340
340
  '''
@@ -351,8 +351,8 @@ class WSNRoutingEnv(gym.Env):
351
351
  max_latency = self.n_sensors * self.steps
352
352
  normalized_latency = self.total_latency / (max_latency + self.epsilon)
353
353
 
354
- # return np.clip(1 - normalized_latency, 0, 1)
355
- return np.clip(- normalized_latency, -1, 1)
354
+ return np.clip(1 - normalized_latency, 0, 1)
355
+ # return np.clip(- normalized_latency, -1, 1)
356
356
 
357
357
  def compute_reward_network_throughput(self):
358
358
  '''
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 3.0.80
3
+ Version: 3.0.81
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=-tNBdDGkUAe9gRz0t2fK_jAloQqZCns0wnEGIXGdB_s,193
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=onqDtWhDb4uXQtcpWZFILFujrH-AbVZXluV-pgYi3nA,20385
4
+ gym_examples-3.0.81.dist-info/METADATA,sha256=Id-4MFw_RH7WUdIcUlhbAsjDQMX05OIilQgWzKJ1h00,411
5
+ gym_examples-3.0.81.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-3.0.81.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-3.0.81.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=hNQ05dR9XWqWYLWTRWQYrb94dzR8CeLomp79s3ty4pc,193
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=0-eIZjWC5qOwEcK3wJsdP9McFn4gptdQdoiI-I5QOlc,20386
4
- gym_examples-3.0.80.dist-info/METADATA,sha256=FgRrwMYEQGezDhVqtqUdE9HVpABQZtHSd9f3vXMEFRU,411
5
- gym_examples-3.0.80.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-3.0.80.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-3.0.80.dist-info/RECORD,,