gym-examples 2.0.2__py3-none-any.whl → 2.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.2"
9
+ __version__ = "2.0.3"
@@ -76,62 +76,100 @@ class WSNRoutingEnv(gym.Env):
76
76
  self.consumption_energy = np.zeros(self.n_sensors)
77
77
  self.number_of_packets = np.ones(self.n_sensors, dtype=int) # Number of packets to transmit
78
78
  self.episode_count += 1
79
-
79
+ self.step_count = 0
80
+ print("\n=====================================")
81
+ print(f"Episode {self.episode_count} started")
82
+ print(f"Sensor positions: {self.sensor_positions}")
83
+ print(f"Distance to base station: {self.distance_to_base}")
84
+ print(f"Remaining energy: {self.remaining_energy}")
85
+ print(f"Number of packets: {self.number_of_packets}")
86
+ print(f"Consumption energy: {self.consumption_energy}")
87
+ print("=====================================\n")
80
88
  return self._get_obs()
81
89
 
82
90
  def step(self, actions):
83
91
  rewards = [0] * self.n_sensors
84
92
  dones = [False] * self.n_sensors
93
+ print("\n=====================================")
94
+ print(f"Step {self.step_count + 1} started")
95
+ print(f"Sensor positions: {self.sensor_positions}")
96
+ print(f"Distance to base station: {self.distance_to_base}")
97
+ print(f"Remaining energy: {self.remaining_energy}")
98
+ print(f"Number of packets: {self.number_of_packets}")
99
+ print(f"Consumption energy: {self.consumption_energy}")
100
+ print("=====================================\n")
101
+ self.step_count += 1
85
102
  for i, action in enumerate(actions):
86
103
 
87
104
  if action not in range(self.n_sensors + 1):
105
+ print(f"Invalid action: {action} for sensor {i}!")
88
106
  raise ValueError("Invalid action!")
89
107
 
90
108
  if i >= self.n_sensors:
109
+ print(f"Invalid sensor i: {i}!")
91
110
  continue # Skip if the number of actions is greater than the number of sensors
92
111
 
93
112
  if self.remaining_energy[i] <= 0 or self.number_of_packets[i] <= 0:
113
+ print(f"Sensor {i} has no energy left or no packets to transmit!")
114
+ print(f"Remaining energy: {self.remaining_energy[i]}")
115
+ print(f"Number of packets: {self.number_of_packets[i]}")
94
116
  continue # Skip if sensor has no energy left or no packets to transmit
95
117
 
96
118
  if (action == i):
119
+ print(f"Sensor {i} tries to transmit data to itself!")
97
120
  continue # Skip if sensor tries to transmit data to itself
98
121
 
99
122
  neighbors_i = self.eligible_receivers(i)
123
+ print(f"Sensor {i} eligible receivers: {neighbors_i}")
100
124
  keys_neighbors_i = list(neighbors_i.keys())
125
+ print(f"Sensor {i} keys of eligible receivers: {keys_neighbors_i}")
101
126
  if len(neighbors_i) == 0 or action not in keys_neighbors_i:
127
+ print(f"Sensor {i} has no eligible receivers or action {action} is not in the list of eligible receivers!")
102
128
  continue
103
129
 
104
130
  remaining_energy_before = copy.deepcopy(self.remaining_energy)
131
+ print(f"Remaining energy before transmission: {remaining_energy_before}")
105
132
  if action == self.n_sensors:
106
133
  rewards[i] = self.compute_individual_rewards(i, action, neighbors_i, remaining_energy_before)
134
+ print(f"Sensor {i} transmits data to the base station")
135
+ print(f"Sensor {i} rewards: {rewards[i]}")
107
136
  dones[i] = True
108
137
  # Calculate the energy consumption and remaining for transmitting data to the base station
109
- self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
138
+ self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
139
+ print(f"Remaining energy after transmission of sensor {i} to base station: {self.remaining_energy}")
140
+ print(f"Consumption energy after transmission of sensor {i} to base station: {self.consumption_energy}")
110
141
  else:
111
142
  self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
112
- self.update_sensor_energies(action, neighbors_i[action]['reception_energy'])
143
+ self.update_sensor_energies(action, neighbors_i[action]['reception_energy'])
144
+ print(f"Remaining energy after transmission of sensor {i} to sensor {action}: {self.remaining_energy}")
145
+ print(f"Consumption energy after transmission of sensor {i} to sensor {action}: {self.consumption_energy}")
113
146
  # Update the number of packets of the sensor action
114
147
  self.number_of_packets[action] += self.number_of_packets[i]
115
-
148
+ print(f"Number of packets of sensor {action}: {self.number_of_packets[action]}")
116
149
  self.distance_to_base[action] = np.linalg.norm(self.sensor_positions[action] - base_station_position)
117
-
150
+ print(f"Distance to base station of sensor {action}: {self.distance_to_base[action]}")
118
151
  # Compute individual rewards
119
152
  rewards[i] = self.compute_individual_rewards(i, action, neighbors_i, remaining_energy_before)
120
-
153
+ print(f"Sensor {i} rewards: {rewards[i]}")
121
154
  self.number_of_packets[i] = 0 # Reset the number of packets of the sensor i
122
-
155
+ print(f"Number of packets of sensor {i}: {self.number_of_packets[i]}")
123
156
  # Calculate final reward
124
157
  # rewards_individual = torch.tensor(rewards[i], dtype=torch.double)
125
158
  # final_reward = net(rewards_individual)
126
159
  final_reward = sum(rewards[i])
127
160
  rewards[i] = final_reward
161
+ print(f"Final reward of sensor {i}: {rewards[i]}")
128
162
 
129
163
  for i in range(self.n_sensors):
130
164
  if (self.remaining_energy[i] <= 0) or (self.number_of_packets[i] <= 0):
165
+ print(f"Sensor {i} has no energy left or no packets to transmit!")
166
+ print(f"Remaining energy: {self.remaining_energy[i]}")
167
+ print(f"Number of packets: {self.number_of_packets[i]}")
131
168
  dones[i] = True
132
169
 
133
170
  # Integrate the mobility of the sensors
134
171
  self.integrate_mobility()
172
+ print(f"Sensor positions after mobility: {self.sensor_positions}")
135
173
 
136
174
  return self._get_obs(), rewards, dones, {}
137
175
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.2
3
+ Version: 2.0.3
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=NDVMqs-BmbAaaVEkGhg9t4AoaV_Jk0MRnorBIpJjRdA,192
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=b1wYD9zifgnfmVYmuwMtCxfNdvwQbVcQHzYLTFi5bWo,18304
4
+ gym_examples-2.0.3.dist-info/METADATA,sha256=heuSoilS5UuI7OqwKDBgQmGC6h14FOUmHG2kICOSXuk,410
5
+ gym_examples-2.0.3.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.3.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.3.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=yUl0F9Een62k4ZsMW38Vh0WMrPjV1oLtPMCo1OECKqw,192
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=S5-qD11zbxxgoxeqpEKz7jTh1dM5xCrA8bGYq6KkVok,15206
4
- gym_examples-2.0.2.dist-info/METADATA,sha256=mntNXLKTgn1aej4y3rXoA6-ytl5xzD-tW2l00FRkCFw,410
5
- gym_examples-2.0.2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.2.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.2.dist-info/RECORD,,