gym-examples 2.0.6__py3-none-any.whl → 2.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.6"
9
+ __version__ = "2.0.8"
@@ -47,7 +47,7 @@ net = net.double() # Convert the weights to Double
47
47
 
48
48
 
49
49
  class WSNRoutingEnv(gym.Env):
50
- def __init__(self, n_sensors = 3, coverage_radius=50):
50
+ def __init__(self, n_sensors = 10, coverage_radius=50):
51
51
 
52
52
  super(WSNRoutingEnv, self).__init__()
53
53
 
@@ -77,99 +77,59 @@ class WSNRoutingEnv(gym.Env):
77
77
  self.number_of_packets = np.ones(self.n_sensors, dtype=int) # Number of packets to transmit
78
78
  self.episode_count += 1
79
79
  self.step_count = 0
80
- print("\n=====================================")
81
- print(f"Episode {self.episode_count} started")
82
- print(f"Sensor positions: {self.sensor_positions}")
83
- print(f"Distance to base station: {self.distance_to_base}")
84
- print(f"Remaining energy: {self.remaining_energy}")
85
- print(f"Number of packets: {self.number_of_packets}")
86
- print(f"Consumption energy: {self.consumption_energy}")
87
- print("=====================================\n")
88
80
  return self._get_obs()
89
81
 
90
82
  def step(self, actions):
83
+ print(f"Actions: {actions}")
91
84
  rewards = [0] * self.n_sensors
92
85
  dones = [False] * self.n_sensors
93
- print("\n=====================================")
94
- print(f"Step {self.step_count + 1} started")
95
- print(f"Sensor positions: {self.sensor_positions}")
96
- print(f"Distance to base station: {self.distance_to_base}")
97
- print(f"Remaining energy: {self.remaining_energy}")
98
- print(f"Number of packets: {self.number_of_packets}")
99
- print(f"Consumption energy: {self.consumption_energy}")
100
- print("=====================================\n")
101
86
  self.step_count += 1
102
87
  for i, action in enumerate(actions):
103
88
 
104
89
  if action not in range(self.n_sensors + 1):
105
- print(f"Invalid action: {action} for sensor {i}!")
106
90
  raise ValueError("Invalid action!")
107
91
 
108
92
  if i >= self.n_sensors:
109
- print(f"Invalid sensor i: {i}!")
110
93
  continue # Skip if the number of actions is greater than the number of sensors
111
94
 
112
95
  if self.remaining_energy[i] <= 0 or self.number_of_packets[i] <= 0:
113
- print(f"Sensor {i} has no energy left or no packets to transmit!")
114
- print(f"Remaining energy: {self.remaining_energy[i]}")
115
- print(f"Number of packets: {self.number_of_packets[i]}")
116
96
  continue # Skip if sensor has no energy left or no packets to transmit
117
97
 
118
98
  if (action == i):
119
- print(f"Sensor {i} tries to transmit data to itself!")
120
99
  continue # Skip if sensor tries to transmit data to itself
121
100
 
122
101
  neighbors_i = self.eligible_receivers(i)
123
- print(f"Sensor {i} eligible receivers: {neighbors_i}")
124
102
  keys_neighbors_i = list(neighbors_i.keys())
125
- print(f"Sensor {i} keys of eligible receivers: {keys_neighbors_i}")
126
103
  if len(neighbors_i) == 0 or action not in keys_neighbors_i:
127
- print(f"Sensor {i} has no eligible receivers or action {action} is not in the list of eligible receivers!")
128
104
  continue
129
105
 
130
106
  remaining_energy_before = copy.deepcopy(self.remaining_energy)
131
- print(f"Remaining energy before transmission: {remaining_energy_before}")
132
107
  if action == self.n_sensors:
133
108
  rewards[i] = self.compute_individual_rewards(i, action, neighbors_i, remaining_energy_before)
134
- print(f"Sensor {i} transmits data to the base station")
135
- print(f"Sensor {i} rewards: {rewards[i]}")
136
109
  dones[i] = True
137
110
  # Calculate the energy consumption and remaining for transmitting data to the base station
138
- self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
139
- print(f"Remaining energy after transmission of sensor {i} to base station: {self.remaining_energy}")
140
- print(f"Consumption energy after transmission of sensor {i} to base station: {self.consumption_energy}")
111
+ self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
141
112
  else:
142
113
  self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
143
114
  self.update_sensor_energies(action, neighbors_i[action]['reception_energy'])
144
- print(f"Remaining energy after transmission of sensor {i} to sensor {action}: {self.remaining_energy}")
145
- print(f"Consumption energy after transmission of sensor {i} to sensor {action}: {self.consumption_energy}")
146
115
  # Update the number of packets of the sensor action
147
116
  self.number_of_packets[action] += self.number_of_packets[i]
148
- print(f"Number of packets of sensor {action}: {self.number_of_packets[action]}")
149
117
  self.distance_to_base[action] = np.linalg.norm(self.sensor_positions[action] - base_station_position)
150
- print(f"Distance to base station of sensor {action}: {self.distance_to_base[action]}")
151
118
  # Compute individual rewards
152
119
  rewards[i] = self.compute_individual_rewards(i, action, neighbors_i, remaining_energy_before)
153
- print(f"Sensor {i} rewards: {rewards[i]}")
154
120
  self.number_of_packets[i] = 0 # Reset the number of packets of the sensor i
155
- print(f"Number of packets of sensor {i}: {self.number_of_packets[i]}")
156
121
  # Calculate final reward
157
122
  # rewards_individual = torch.tensor(rewards[i], dtype=torch.double)
158
123
  # final_reward = net(rewards_individual)
159
124
  final_reward = sum(rewards[i])
160
125
  rewards[i] = final_reward
161
- print(f"Final reward of sensor {i}: {rewards[i]}")
162
126
 
163
127
  for i in range(self.n_sensors):
164
128
  if (self.remaining_energy[i] <= 0) or (self.number_of_packets[i] <= 0):
165
- print(f"Sensor {i} has no energy left or no packets to transmit!")
166
- print(f"Remaining energy: {self.remaining_energy[i]}")
167
- print(f"Number of packets: {self.number_of_packets[i]}")
168
129
  dones[i] = True
169
130
 
170
131
  # Integrate the mobility of the sensors
171
- self.integrate_mobility()
172
- print(f"Sensor positions after mobility: {self.sensor_positions}")
132
+ # self.integrate_mobility()
173
133
 
174
134
  return self._get_obs(), rewards, dones, {}
175
135
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.6
3
+ Version: 2.0.8
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=fIbIs37EHyLkQTZ5Ms2Qa1NGVpG3opspoUbt_BgWriY,192
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=cpqNN2fOEBE-_w-E4stAPH1VLsxPlIGtoZVV3dHmvhU,15236
4
+ gym_examples-2.0.8.dist-info/METADATA,sha256=XlQxAfb-C0awmiUAIbEmsli6nSJPMWGZiqJYl1y_UUk,410
5
+ gym_examples-2.0.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.8.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.8.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=OAOJG5LVjjNoWlTd7QFD05jF7mgEZRfiwyMoXfOwgDI,192
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=c6hVNNjnShx3JCPvO_Dym90-kuAMP2i1LYXyYwyHDCI,18303
4
- gym_examples-2.0.6.dist-info/METADATA,sha256=3daeoe2bbui0jynh2lB_qv3veL9cVpV3YDxHBtWUFXA,410
5
- gym_examples-2.0.6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.6.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.6.dist-info/RECORD,,