gym-examples 2.0.7__py3-none-any.whl → 2.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gym_examples/__init__.py CHANGED
@@ -6,4 +6,4 @@ register(
6
6
  max_episode_steps=50,
7
7
  )
8
8
 
9
- __version__ = "2.0.7"
9
+ __version__ = "2.0.8"
@@ -47,7 +47,7 @@ net = net.double() # Convert the weights to Double
47
47
 
48
48
 
49
49
  class WSNRoutingEnv(gym.Env):
50
- def __init__(self, n_sensors = 3, coverage_radius=50):
50
+ def __init__(self, n_sensors = 10, coverage_radius=50):
51
51
 
52
52
  super(WSNRoutingEnv, self).__init__()
53
53
 
@@ -77,100 +77,59 @@ class WSNRoutingEnv(gym.Env):
77
77
  self.number_of_packets = np.ones(self.n_sensors, dtype=int) # Number of packets to transmit
78
78
  self.episode_count += 1
79
79
  self.step_count = 0
80
- print("\n=====================================")
81
- print(f"Episode {self.episode_count} started")
82
- print(f"Sensor positions: {self.sensor_positions}")
83
- print(f"Distance to base station: {self.distance_to_base}")
84
- print(f"Remaining energy: {self.remaining_energy}")
85
- print(f"Number of packets: {self.number_of_packets}")
86
- print(f"Consumption energy: {self.consumption_energy}")
87
- print("=====================================\n")
88
80
  return self._get_obs()
89
81
 
90
82
  def step(self, actions):
91
83
  print(f"Actions: {actions}")
92
84
  rewards = [0] * self.n_sensors
93
85
  dones = [False] * self.n_sensors
94
- print("\n=====================================")
95
- print(f"Step {self.step_count + 1} started")
96
- print(f"Sensor positions: {self.sensor_positions}")
97
- print(f"Distance to base station: {self.distance_to_base}")
98
- print(f"Remaining energy: {self.remaining_energy}")
99
- print(f"Number of packets: {self.number_of_packets}")
100
- print(f"Consumption energy: {self.consumption_energy}")
101
- print("=====================================\n")
102
86
  self.step_count += 1
103
87
  for i, action in enumerate(actions):
104
88
 
105
89
  if action not in range(self.n_sensors + 1):
106
- print(f"Invalid action: {action} for sensor {i}!")
107
90
  raise ValueError("Invalid action!")
108
91
 
109
92
  if i >= self.n_sensors:
110
- print(f"Invalid sensor i: {i}!")
111
93
  continue # Skip if the number of actions is greater than the number of sensors
112
94
 
113
95
  if self.remaining_energy[i] <= 0 or self.number_of_packets[i] <= 0:
114
- print(f"Sensor {i} has no energy left or no packets to transmit!")
115
- print(f"Remaining energy: {self.remaining_energy[i]}")
116
- print(f"Number of packets: {self.number_of_packets[i]}")
117
96
  continue # Skip if sensor has no energy left or no packets to transmit
118
97
 
119
98
  if (action == i):
120
- print(f"Sensor {i} tries to transmit data to itself!")
121
99
  continue # Skip if sensor tries to transmit data to itself
122
100
 
123
101
  neighbors_i = self.eligible_receivers(i)
124
- print(f"Sensor {i} eligible receivers: {neighbors_i}")
125
102
  keys_neighbors_i = list(neighbors_i.keys())
126
- print(f"Sensor {i} keys of eligible receivers: {keys_neighbors_i}")
127
103
  if len(neighbors_i) == 0 or action not in keys_neighbors_i:
128
- print(f"Sensor {i} has no eligible receivers or action {action} is not in the list of eligible receivers!")
129
104
  continue
130
105
 
131
106
  remaining_energy_before = copy.deepcopy(self.remaining_energy)
132
- print(f"Remaining energy before transmission: {remaining_energy_before}")
133
107
  if action == self.n_sensors:
134
108
  rewards[i] = self.compute_individual_rewards(i, action, neighbors_i, remaining_energy_before)
135
- print(f"Sensor {i} transmits data to the base station")
136
- print(f"Sensor {i} rewards: {rewards[i]}")
137
109
  dones[i] = True
138
110
  # Calculate the energy consumption and remaining for transmitting data to the base station
139
- self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
140
- print(f"Remaining energy after transmission of sensor {i} to base station: {self.remaining_energy}")
141
- print(f"Consumption energy after transmission of sensor {i} to base station: {self.consumption_energy}")
111
+ self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
142
112
  else:
143
113
  self.update_sensor_energies(i, neighbors_i[action]['transmission_energy'])
144
114
  self.update_sensor_energies(action, neighbors_i[action]['reception_energy'])
145
- print(f"Remaining energy after transmission of sensor {i} to sensor {action}: {self.remaining_energy}")
146
- print(f"Consumption energy after transmission of sensor {i} to sensor {action}: {self.consumption_energy}")
147
115
  # Update the number of packets of the sensor action
148
116
  self.number_of_packets[action] += self.number_of_packets[i]
149
- print(f"Number of packets of sensor {action}: {self.number_of_packets[action]}")
150
117
  self.distance_to_base[action] = np.linalg.norm(self.sensor_positions[action] - base_station_position)
151
- print(f"Distance to base station of sensor {action}: {self.distance_to_base[action]}")
152
118
  # Compute individual rewards
153
119
  rewards[i] = self.compute_individual_rewards(i, action, neighbors_i, remaining_energy_before)
154
- print(f"Sensor {i} rewards: {rewards[i]}")
155
120
  self.number_of_packets[i] = 0 # Reset the number of packets of the sensor i
156
- print(f"Number of packets of sensor {i}: {self.number_of_packets[i]}")
157
121
  # Calculate final reward
158
122
  # rewards_individual = torch.tensor(rewards[i], dtype=torch.double)
159
123
  # final_reward = net(rewards_individual)
160
124
  final_reward = sum(rewards[i])
161
125
  rewards[i] = final_reward
162
- print(f"Final reward of sensor {i}: {rewards[i]}")
163
126
 
164
127
  for i in range(self.n_sensors):
165
128
  if (self.remaining_energy[i] <= 0) or (self.number_of_packets[i] <= 0):
166
- print(f"Sensor {i} has no energy left or no packets to transmit!")
167
- print(f"Remaining energy: {self.remaining_energy[i]}")
168
- print(f"Number of packets: {self.number_of_packets[i]}")
169
129
  dones[i] = True
170
130
 
171
131
  # Integrate the mobility of the sensors
172
- self.integrate_mobility()
173
- print(f"Sensor positions after mobility: {self.sensor_positions}")
132
+ # self.integrate_mobility()
174
133
 
175
134
  return self._get_obs(), rewards, dones, {}
176
135
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gym-examples
3
- Version: 2.0.7
3
+ Version: 2.0.8
4
4
  Summary: A custom environment for multi-agent reinforcement learning focused on WSN routing.
5
5
  Home-page: https://github.com/gedji/CODES.git
6
6
  Author: Georges Djimefo
@@ -0,0 +1,7 @@
1
+ gym_examples/__init__.py,sha256=fIbIs37EHyLkQTZ5Ms2Qa1NGVpG3opspoUbt_BgWriY,192
2
+ gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
+ gym_examples/envs/wsn_env.py,sha256=cpqNN2fOEBE-_w-E4stAPH1VLsxPlIGtoZVV3dHmvhU,15236
4
+ gym_examples-2.0.8.dist-info/METADATA,sha256=XlQxAfb-C0awmiUAIbEmsli6nSJPMWGZiqJYl1y_UUk,410
5
+ gym_examples-2.0.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ gym_examples-2.0.8.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
+ gym_examples-2.0.8.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- gym_examples/__init__.py,sha256=S24PMK01kJnGXPbYfpKMTHxTUI-SziAjOgRu5W-S7YY,192
2
- gym_examples/envs/__init__.py,sha256=lgMe4pyOuUTgTBUddM0iwMlETsYTwFShny6ifm8PGM8,53
3
- gym_examples/envs/wsn_env.py,sha256=_XVGI1ZWnUr0vhgBqV1HdyJzMoilFra_vMM27CnQceE,18341
4
- gym_examples-2.0.7.dist-info/METADATA,sha256=IcY8_pntZCRrWmjJN8A8WMRGut-0zVXfpD4V01IPpfQ,410
5
- gym_examples-2.0.7.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
- gym_examples-2.0.7.dist-info/top_level.txt,sha256=rJRksoAF32M6lTLBEwYzRdo4PgtejceaNnnZ3HeY_Rk,13
7
- gym_examples-2.0.7.dist-info/RECORD,,