Mesa 2.2.3__py3-none-any.whl → 2.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of Mesa might be problematic. Click here for more details.

@@ -0,0 +1,273 @@
1
+ import enum
2
+ import math
3
+
4
+ from mesa import Agent, Model
5
+ from mesa.experimental.devs.simulator import ABMSimulator
6
+ from mesa.space import SingleGrid
7
+
8
+
9
+ class EpsteinAgent(Agent):
10
+ def __init__(self, unique_id, model, vision, movement):
11
+ super().__init__(unique_id, model)
12
+ self.vision = vision
13
+ self.movement = movement
14
+
15
+
16
+ class AgentState(enum.IntEnum):
17
+ QUIESCENT = enum.auto()
18
+ ARRESTED = enum.auto()
19
+ ACTIVE = enum.auto()
20
+
21
+
22
+ class Citizen(EpsteinAgent):
23
+ """
24
+ A member of the general population, may or may not be in active rebellion.
25
+ Summary of rule: If grievance - risk > threshold, rebel.
26
+
27
+ Attributes:
28
+ unique_id: unique int
29
+ model :
30
+ hardship: Agent's 'perceived hardship (i.e., physical or economic
31
+ privation).' Exogenous, drawn from U(0,1).
32
+ regime_legitimacy: Agent's perception of regime legitimacy, equal
33
+ across agents. Exogenous.
34
+ risk_aversion: Exogenous, drawn from U(0,1).
35
+ threshold: if (grievance - (risk_aversion * arrest_probability)) >
36
+ threshold, go/remain Active
37
+ vision: number of cells in each direction (N, S, E and W) that agent
38
+ can inspect
39
+ condition: Can be "Quiescent" or "Active;" deterministic function of
40
+ greivance, perceived risk, and
41
+ grievance: deterministic function of hardship and regime_legitimacy;
42
+ how aggrieved is agent at the regime?
43
+ arrest_probability: agent's assessment of arrest probability, given
44
+ rebellion
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ unique_id,
50
+ model,
51
+ vision,
52
+ movement,
53
+ hardship,
54
+ regime_legitimacy,
55
+ risk_aversion,
56
+ threshold,
57
+ arrest_prob_constant,
58
+ ):
59
+ """
60
+ Create a new Citizen.
61
+ Args:
62
+ unique_id: unique int
63
+ model : model instance
64
+ hardship: Agent's 'perceived hardship (i.e., physical or economic
65
+ privation).' Exogenous, drawn from U(0,1).
66
+ regime_legitimacy: Agent's perception of regime legitimacy, equal
67
+ across agents. Exogenous.
68
+ risk_aversion: Exogenous, drawn from U(0,1).
69
+ threshold: if (grievance - (risk_aversion * arrest_probability)) >
70
+ threshold, go/remain Active
71
+ vision: number of cells in each direction (N, S, E and W) that
72
+ agent can inspect. Exogenous.
73
+ """
74
+ super().__init__(unique_id, model, vision, movement)
75
+ self.hardship = hardship
76
+ self.regime_legitimacy = regime_legitimacy
77
+ self.risk_aversion = risk_aversion
78
+ self.threshold = threshold
79
+ self.condition = AgentState.QUIESCENT
80
+ self.grievance = self.hardship * (1 - self.regime_legitimacy)
81
+ self.arrest_probability = None
82
+ self.arrest_prob_constant = arrest_prob_constant
83
+
84
+ def step(self):
85
+ """
86
+ Decide whether to activate, then move if applicable.
87
+ """
88
+ self.update_neighbors()
89
+ self.update_estimated_arrest_probability()
90
+ net_risk = self.risk_aversion * self.arrest_probability
91
+ if self.grievance - net_risk > self.threshold:
92
+ self.condition = AgentState.ACTIVE
93
+ else:
94
+ self.condition = AgentState.QUIESCENT
95
+ if self.movement and self.empty_neighbors:
96
+ new_pos = self.random.choice(self.empty_neighbors)
97
+ self.model.grid.move_agent(self, new_pos)
98
+
99
+ def update_neighbors(self):
100
+ """
101
+ Look around and see who my neighbors are
102
+ """
103
+ self.neighborhood = self.model.grid.get_neighborhood(
104
+ self.pos, moore=True, radius=self.vision
105
+ )
106
+ self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood)
107
+ self.empty_neighbors = [
108
+ c for c in self.neighborhood if self.model.grid.is_cell_empty(c)
109
+ ]
110
+
111
+ def update_estimated_arrest_probability(self):
112
+ """
113
+ Based on the ratio of cops to actives in my neighborhood, estimate the
114
+ p(Arrest | I go active).
115
+ """
116
+ cops_in_vision = len([c for c in self.neighbors if isinstance(c, Cop)])
117
+ actives_in_vision = 1.0 # citizen counts herself
118
+ for c in self.neighbors:
119
+ if isinstance(c, Citizen) and c.condition == AgentState.ACTIVE:
120
+ actives_in_vision += 1
121
+ self.arrest_probability = 1 - math.exp(
122
+ -1 * self.arrest_prob_constant * (cops_in_vision / actives_in_vision)
123
+ )
124
+
125
+ def sent_to_jail(self, value):
126
+ self.model.active_agents.remove(self)
127
+ self.condition = AgentState.ARRESTED
128
+ self.model.simulator.schedule_event_relative(self.release_from_jail, value)
129
+
130
+ def release_from_jail(self):
131
+ self.model.active_agents.add(self)
132
+ self.condition = AgentState.QUIESCENT
133
+
134
+
135
+ class Cop(EpsteinAgent):
136
+ """
137
+ A cop for life. No defection.
138
+ Summary of rule: Inspect local vision and arrest a random active agent.
139
+
140
+ Attributes:
141
+ unique_id: unique int
142
+ x, y: Grid coordinates
143
+ vision: number of cells in each direction (N, S, E and W) that cop is
144
+ able to inspect
145
+ """
146
+
147
+ def __init__(self, unique_id, model, vision, movement, max_jail_term):
148
+ super().__init__(unique_id, model, vision, movement)
149
+ self.max_jail_term = max_jail_term
150
+
151
+ def step(self):
152
+ """
153
+ Inspect local vision and arrest a random active agent. Move if
154
+ applicable.
155
+ """
156
+ self.update_neighbors()
157
+ active_neighbors = []
158
+ for agent in self.neighbors:
159
+ if isinstance(agent, Citizen) and agent.condition == "Active":
160
+ active_neighbors.append(agent)
161
+ if active_neighbors:
162
+ arrestee = self.random.choice(active_neighbors)
163
+ arrestee.sent_to_jail(self.random.randint(0, self.max_jail_term))
164
+ if self.movement and self.empty_neighbors:
165
+ new_pos = self.random.choice(self.empty_neighbors)
166
+ self.model.grid.move_agent(self, new_pos)
167
+
168
+ def update_neighbors(self):
169
+ """
170
+ Look around and see who my neighbors are.
171
+ """
172
+ self.neighborhood = self.model.grid.get_neighborhood(
173
+ self.pos, moore=True, radius=self.vision
174
+ )
175
+ self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood)
176
+ self.empty_neighbors = [
177
+ c for c in self.neighborhood if self.model.grid.is_cell_empty(c)
178
+ ]
179
+
180
+
181
+ class EpsteinCivilViolence(Model):
182
+ """
183
+ Model 1 from "Modeling civil violence: An agent-based computational
184
+ approach," by Joshua Epstein.
185
+ http://www.pnas.org/content/99/suppl_3/7243.full
186
+ Attributes:
187
+ height: grid height
188
+ width: grid width
189
+ citizen_density: approximate % of cells occupied by citizens.
190
+ cop_density: approximate % of cells occupied by cops.
191
+ citizen_vision: number of cells in each direction (N, S, E and W) that
192
+ citizen can inspect
193
+ cop_vision: number of cells in each direction (N, S, E and W) that cop
194
+ can inspect
195
+ legitimacy: (L) citizens' perception of regime legitimacy, equal
196
+ across all citizens
197
+ max_jail_term: (J_max)
198
+ active_threshold: if (grievance - (risk_aversion * arrest_probability))
199
+ > threshold, citizen rebels
200
+ arrest_prob_constant: set to ensure agents make plausible arrest
201
+ probability estimates
202
+ movement: binary, whether agents try to move at step end
203
+ max_iters: model may not have a natural stopping point, so we set a
204
+ max.
205
+ """
206
+
207
+ def __init__(
208
+ self,
209
+ width=40,
210
+ height=40,
211
+ citizen_density=0.7,
212
+ cop_density=0.074,
213
+ citizen_vision=7,
214
+ cop_vision=7,
215
+ legitimacy=0.8,
216
+ max_jail_term=1000,
217
+ active_threshold=0.1,
218
+ arrest_prob_constant=2.3,
219
+ movement=True,
220
+ max_iters=1000,
221
+ seed=None,
222
+ ):
223
+ super().__init__(seed)
224
+ if cop_density + citizen_density > 1:
225
+ raise ValueError("Cop density + citizen density must be less than 1")
226
+
227
+ self.width = width
228
+ self.height = height
229
+ self.citizen_density = citizen_density
230
+ self.cop_density = cop_density
231
+
232
+ self.max_iters = max_iters
233
+
234
+ self.grid = SingleGrid(self.width, self.height, torus=True)
235
+
236
+ for _, pos in self.grid.coord_iter():
237
+ if self.random.random() < self.cop_density:
238
+ agent = Cop(
239
+ self.next_id(),
240
+ self,
241
+ cop_vision,
242
+ movement,
243
+ max_jail_term,
244
+ )
245
+ elif self.random.random() < (self.cop_density + self.citizen_density):
246
+ agent = Citizen(
247
+ self.next_id(),
248
+ self,
249
+ citizen_vision,
250
+ movement,
251
+ hardship=self.random.random(),
252
+ regime_legitimacy=legitimacy,
253
+ risk_aversion=self.random.random(),
254
+ threshold=active_threshold,
255
+ arrest_prob_constant=arrest_prob_constant,
256
+ )
257
+ else:
258
+ continue
259
+ self.grid.place_agent(agent, pos)
260
+
261
+ self.active_agents = self.agents
262
+
263
+ def step(self):
264
+ self.active_agents.shuffle(inplace=True).do("step")
265
+
266
+
267
+ if __name__ == "__main__":
268
+ model = EpsteinCivilViolence(seed=15)
269
+ simulator = ABMSimulator()
270
+
271
+ simulator.setup(model)
272
+
273
+ simulator.run(time_delta=100)
@@ -0,0 +1,250 @@
1
+ """
2
+ Wolf-Sheep Predation Model
3
+ ================================
4
+
5
+ Replication of the model found in NetLogo:
6
+ Wilensky, U. (1997). NetLogo Wolf Sheep Predation model.
7
+ http://ccl.northwestern.edu/netlogo/models/WolfSheepPredation.
8
+ Center for Connected Learning and Computer-Based Modeling,
9
+ Northwestern University, Evanston, IL.
10
+ """
11
+
12
+ import mesa
13
+ from mesa.experimental.devs.simulator import ABMSimulator
14
+
15
+
16
+ class Animal(mesa.Agent):
17
+ def __init__(self, unique_id, model, moore, energy, p_reproduce, energy_from_food):
18
+ super().__init__(unique_id, model)
19
+ self.energy = energy
20
+ self.p_reproduce = p_reproduce
21
+ self.energy_from_food = energy_from_food
22
+ self.moore = moore
23
+
24
+ def random_move(self):
25
+ next_moves = self.model.grid.get_neighborhood(self.pos, self.moore, True)
26
+ next_move = self.random.choice(next_moves)
27
+ # Now move:
28
+ self.model.grid.move_agent(self, next_move)
29
+
30
+ def spawn_offspring(self):
31
+ self.energy /= 2
32
+ offspring = self.__class__(
33
+ self.model.next_id(),
34
+ self.model,
35
+ self.moore,
36
+ self.energy,
37
+ self.p_reproduce,
38
+ self.energy_from_food,
39
+ )
40
+ self.model.grid.place_agent(offspring, self.pos)
41
+
42
+ def feed(self): ...
43
+
44
+ def die(self):
45
+ self.model.grid.remove_agent(self)
46
+ self.remove()
47
+
48
+ def step(self):
49
+ self.random_move()
50
+ self.energy -= 1
51
+
52
+ self.feed()
53
+
54
+ if self.energy < 0:
55
+ self.die()
56
+ elif self.random.random() < self.p_reproduce:
57
+ self.spawn_offspring()
58
+
59
+
60
+ class Sheep(Animal):
61
+ """
62
+ A sheep that walks around, reproduces (asexually) and gets eaten.
63
+
64
+ The init is the same as the RandomWalker.
65
+ """
66
+
67
+ def feed(self):
68
+ # If there is grass available, eat it
69
+ agents = self.model.grid.get_cell_list_contents(self.pos)
70
+ grass_patch = next(obj for obj in agents if isinstance(obj, GrassPatch))
71
+ if grass_patch.fully_grown:
72
+ self.energy += self.energy_from_food
73
+ grass_patch.fully_grown = False
74
+
75
+
76
+ class Wolf(Animal):
77
+ """
78
+ A wolf that walks around, reproduces (asexually) and eats sheep.
79
+ """
80
+
81
+ def feed(self):
82
+ agents = self.model.grid.get_cell_list_contents(self.pos)
83
+ sheep = [obj for obj in agents if isinstance(obj, Sheep)]
84
+ if len(sheep) > 0:
85
+ sheep_to_eat = self.random.choice(sheep)
86
+ self.energy += self.energy
87
+
88
+ # Kill the sheep
89
+ sheep_to_eat.die()
90
+
91
+
92
+ class GrassPatch(mesa.Agent):
93
+ """
94
+ A patch of grass that grows at a fixed rate and it is eaten by sheep
95
+ """
96
+
97
+ @property
98
+ def fully_grown(self) -> bool:
99
+ return self._fully_grown
100
+
101
+ @fully_grown.setter
102
+ def fully_grown(self, value: bool):
103
+ self._fully_grown = value
104
+
105
+ if not value:
106
+ self.model.simulator.schedule_event_relative(
107
+ setattr,
108
+ self.grass_regrowth_time,
109
+ function_args=[self, "fully_grown", True],
110
+ )
111
+
112
+ def __init__(self, unique_id, model, fully_grown, countdown, grass_regrowth_time):
113
+ """
114
+ Creates a new patch of grass
115
+
116
+ Args:
117
+ grown: (boolean) Whether the patch of grass is fully grown or not
118
+ countdown: Time for the patch of grass to be fully grown again
119
+ """
120
+ super().__init__(unique_id, model)
121
+ self._fully_grown = fully_grown
122
+ self.grass_regrowth_time = grass_regrowth_time
123
+
124
+ if not self.fully_grown:
125
+ self.model.simulator.schedule_event_relative(
126
+ setattr, countdown, function_args=[self, "fully_grown", True]
127
+ )
128
+
129
+ def set_fully_grown(self):
130
+ self.fully_grown = True
131
+
132
+
133
+ class WolfSheep(mesa.Model):
134
+ """
135
+ Wolf-Sheep Predation Model
136
+
137
+ A model for simulating wolf and sheep (predator-prey) ecosystem modelling.
138
+ """
139
+
140
+ def __init__(
141
+ self,
142
+ height,
143
+ width,
144
+ initial_sheep,
145
+ initial_wolves,
146
+ sheep_reproduce,
147
+ wolf_reproduce,
148
+ grass_regrowth_time,
149
+ wolf_gain_from_food=13,
150
+ sheep_gain_from_food=5,
151
+ moore=False,
152
+ simulator=None,
153
+ seed=None,
154
+ ):
155
+ """
156
+ Create a new Wolf-Sheep model with the given parameters.
157
+
158
+ Args:
159
+ initial_sheep: Number of sheep to start with
160
+ initial_wolves: Number of wolves to start with
161
+ sheep_reproduce: Probability of each sheep reproducing each step
162
+ wolf_reproduce: Probability of each wolf reproducing each step
163
+ wolf_gain_from_food: Energy a wolf gains from eating a sheep
164
+ grass: Whether to have the sheep eat grass for energy
165
+ grass_regrowth_time: How long it takes for a grass patch to regrow
166
+ once it is eaten
167
+ sheep_gain_from_food: Energy sheep gain from grass, if enabled.
168
+ moore:
169
+ """
170
+ super().__init__(seed=seed)
171
+ # Set parameters
172
+ self.height = height
173
+ self.width = width
174
+ self.initial_sheep = initial_sheep
175
+ self.initial_wolves = initial_wolves
176
+ self.simulator = simulator
177
+
178
+ # self.sheep_reproduce = sheep_reproduce
179
+ # self.wolf_reproduce = wolf_reproduce
180
+ # self.grass_regrowth_time = grass_regrowth_time
181
+ # self.wolf_gain_from_food = wolf_gain_from_food
182
+ # self.sheep_gain_from_food = sheep_gain_from_food
183
+ # self.moore = moore
184
+
185
+ self.grid = mesa.space.MultiGrid(self.height, self.width, torus=False)
186
+
187
+ for _ in range(self.initial_sheep):
188
+ pos = (
189
+ self.random.randrange(self.width),
190
+ self.random.randrange(self.height),
191
+ )
192
+ energy = self.random.randrange(2 * sheep_gain_from_food)
193
+ sheep = Sheep(
194
+ self.next_id(),
195
+ self,
196
+ moore,
197
+ energy,
198
+ sheep_reproduce,
199
+ sheep_gain_from_food,
200
+ )
201
+ self.grid.place_agent(sheep, pos)
202
+
203
+ # Create wolves
204
+ for _ in range(self.initial_wolves):
205
+ pos = (
206
+ self.random.randrange(self.width),
207
+ self.random.randrange(self.height),
208
+ )
209
+ energy = self.random.randrange(2 * wolf_gain_from_food)
210
+ wolf = Wolf(
211
+ self.next_id(),
212
+ self,
213
+ moore,
214
+ energy,
215
+ wolf_reproduce,
216
+ wolf_gain_from_food,
217
+ )
218
+ self.grid.place_agent(wolf, pos)
219
+
220
+ # Create grass patches
221
+ possibly_fully_grown = [True, False]
222
+ for _agent, pos in self.grid.coord_iter():
223
+ fully_grown = self.random.choice(possibly_fully_grown)
224
+ if fully_grown:
225
+ countdown = grass_regrowth_time
226
+ else:
227
+ countdown = self.random.randrange(grass_regrowth_time)
228
+ patch = GrassPatch(
229
+ self.next_id(), self, fully_grown, countdown, grass_regrowth_time
230
+ )
231
+ self.grid.place_agent(patch, pos)
232
+
233
+ def step(self):
234
+ self.get_agents_of_type(Sheep).shuffle(inplace=True).do("step")
235
+ self.get_agents_of_type(Wolf).shuffle(inplace=True).do("step")
236
+
237
+
238
+ if __name__ == "__main__":
239
+ import time
240
+
241
+ simulator = ABMSimulator()
242
+
243
+ model = WolfSheep(25, 25, 60, 40, 0.2, 0.1, 20, simulator=simulator, seed=15)
244
+
245
+ simulator.setup(model)
246
+
247
+ start_time = time.perf_counter()
248
+ simulator.run(100)
249
+ print(simulator.time)
250
+ print("Time:", time.perf_counter() - start_time)