gym-examples 2.0.7__py3-none-any.whl → 2.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.7"
9
+ __version__ = "2.0.9"
@@ -47,7 +47,7 @@ net = net.double() # Convert the weights to Double
47
47
 
48
48
 
49
49
  class WSNRoutingEnv(gym.Env):
50
- def __init__(self, n_sensors = 3, coverage_radius=50):
50
+ def __init__(self, n_sensors = 30, coverage_radius=50):
51
51
 
52
52
  super(WSNRoutingEnv, self).__init__()
53
53
 
@@ -76,101 +76,57 @@ class WSNRoutingEnv(gym.Env):
76
76
  self.consumption_energy = np.zeros(self.n_sensors)
77
77
  self.number_of_packets = np.ones(self.n_sensors, dtype=int) # Number of packets to transmit
78
78
  self.episode_count += 1
79
- self.step_count = 0
80
- print("\n=====================================")
81
- print(f"Episode {self.episode_count} started")
82
- print(f"Sensor positions: {self.sensor_positions}")
83
- print(f"Distance to base station: {self.distance_to_base}")
84
- print(f"Remaining energy: {self.remaining_energy}")
85
- print(f"Number of packets: {self.number_of_packets}")
86
- print(f"Consumption energy: {self.consumption_energy}")
87
- print("=====================================\n")
88
79
  return self._get_obs()
89
80
 
90
81
  def step(self, actions):
91
- print(f"Actions: {actions}")
92
82
  rewards = [0] * self.n_sensors
93
83
  dones = [False] * self.n_sensors
94
- print("\n=====================================")
95
- print(f"Step {self.step_count + 1} started")
96
- print(f"Sensor positions: {self.sensor_positions}")
97
- print(f"Distance to base station: {self.distance_to_base}")
98
- print(f"Remaining energy: {self.remaining_energy}")
99
- print(f"Number of packets: {self.number_of_packets}")
100
- print(f"Consumption energy: {self.consumption_energy}")
101
- print("=====================================\n")
102
- self.step_count += 1
103
84
  for i, action in enumerate(actions):
104
85
 
105
86
  if action not in range(self.n_sensors + 1):
106
- print(f"Invalid action: {action} for sensor {i}!")
107
87
  raise ValueError("Invalid action!")
108
88
 
109
89
  if i >= self.n_sensors:
110
- print(f"Invalid sensor i: {i}!")
111
90
  continue # Skip if the number of actions is greater than the number of sensors
112
91
 
113
92
  if self.remaining_energy[i] <= 0 or self.number_of_packets[i] <= 0:
114
- print(f"Sensor {i} has no energy left or no packets to transmit!")
115
- print(f"Remaining energy: {self.remaining_energy[i]}")
116
- print(f"Number of packets: {self.number_of_packets[i]}")
117
93
  continue # Skip if sensor has no energy left or no packets to transmit
118
94
 
119
95
  if (action == i):
120
- print(f"Sensor {i} tries to transmit data to itself!")
121
96
  continue # Skip if sensor tries to transmit data to itself
122
97
 
123
98
  neighbors_i = self.eligible_receivers(i)
124
- print(f"Sensor {i} eligible receivers: {neighbors_i}")
125
99
  keys_neighbors_i = list(neighbors_i.keys())
126
- print(f"Sensor {i} keys of eligible receivers: {keys_neighbors_i}")
127
100
  if len(neighbors_i) == 0 or action not in keys_neighbors_i:
128
- print(f"Sensor {i} has no eligible receivers or action {action} is not in the list of eligible receivers!")
129
101
  continue
130
102
 
131
103
  remaining_energy_before = copy.deepcopy(self.remaining_energy)
132
- print(f"Remaining energy before transmission: {remaining_energy_before}")
133
104
  if action == self.n_sensors:
134
105
  rewards[i] = self.compute_individual_rewards(i, action, neighbors_i, remaining_energy_before)
135
- print(f"Sensor {i} transmits data to the base station")
136
- print(f"Sensor {i} rewards: {rewards[i]}")
137
106
  dones[i] = True
138
107
  # Calculate the energy consumption and remaining for transmitting data to the base station
139
- self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
140
- print(f"Remaining energy after transmission of sensor {i} to base station: {self.remaining_energy}")
141
- print(f"Consumption energy after transmission of sensor {i} to base station: {self.consumption_energy}")
108
+ self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
142
109
  else:
143
110
  self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
144
111
  self.update_sensor_energies(action, neighbors_i[action]['reception_energy'])
145
- print(f"Remaining energy after transmission of sensor {i} to sensor {action}: {self.remaining_energy}")
146
- print(f"Consumption energy after transmission of sensor {i} to sensor {action}: {self.consumption_energy}")
147
112
  # Update the number of packets of the sensor action
148
113
  self.number_of_packets[action] += self.number_of_packets[i]
149
- print(f"Number of packets of sensor {action}: {self.number_of_packets[action]}")
150
114
  self.distance_to_base[action] = np.linalg.norm(self.sensor_positions[action] - base_station_position)
151
- print(f"Distance to base station of sensor {action}: {self.distance_to_base[action]}")
152
115
  # Compute individual rewards
153
116
  rewards[i] = self.compute_individual_rewards(i, action, neighbors_i, remaining_energy_before)
154
- print(f"Sensor {i} rewards: {rewards[i]}")
155
117
  self.number_of_packets[i] = 0 # Reset the number of packets of the sensor i
156
- print(f"Number of packets of sensor {i}: {self.number_of_packets[i]}")
157
118
  # Calculate final reward
158
119
  # rewards_individual = torch.tensor(rewards[i], dtype=torch.double)
159
120
  # final_reward = net(rewards_individual)
160
121
  final_reward = sum(rewards[i])
161
122
  rewards[i] = final_reward
162
- print(f"Final reward of sensor {i}: {rewards[i]}")
163
123
 
164
124
  for i in range(self.n_sensors):
165
125
  if (self.remaining_energy[i] <= 0) or (self.number_of_packets[i] <= 0):
166
- print(f"Sensor {i} has no energy left or no packets to transmit!")
167
- print(f"Remaining energy: {self.remaining_energy[i]}")
168
- print(f"Number of packets: {self.number_of_packets[i]}")
169
126
  dones[i] = True
170
127
 
171
128
  # Integrate the mobility of the sensors
172
129
  self.integrate_mobility()
173
- print(f"Sensor positions after mobility: {self.sensor_positions}")
174
130
 
175
131
  return self._get_obs(), rewards, dones, {}
176
132
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.7
3
+ Version: 2.0.9
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=gqSyx1EEuQeAWNhqLrgfA_OQ8AWGNJ0ZHREx2Wvr6kk,192
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=Iq3O6LUZnOwIry3rZRcSkfe0jLfKJnyv5ZL18aDwAvQ,15137
4
+ gym_examples-2.0.9.dist-info/METADATA,sha256=xjdl49NkTN3aMG215C5vqaflQgwwesIAQdUw_xBjRiE,410
5
+ gym_examples-2.0.9.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.9.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.9.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=S24PMK01kJnGXPbYfpKMTHxTUI-SziAjOgRu5W-S7YY,192
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=_XVGI1ZWnUr0vhgBqV1HdyJzMoilFra_vMM27CnQceE,18341
4
- gym_examples-2.0.7.dist-info/METADATA,sha256=IcY8_pntZCRrWmjJN8A8WMRGut-0zVXfpD4V01IPpfQ,410
5
- gym_examples-2.0.7.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.7.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.7.dist-info/RECORD,,