Mesa 3.0.0b0__py3-none-any.whl → 3.0.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Mesa might be problematic. Click here for more details.
- mesa/__init__.py +2 -1
- mesa/agent.py +37 -27
- mesa/examples/README.md +37 -0
- mesa/examples/__init__.py +21 -0
- mesa/examples/advanced/__init__.py +0 -0
- mesa/examples/advanced/epstein_civil_violence/Epstein Civil Violence.ipynb +116 -0
- mesa/examples/advanced/epstein_civil_violence/Readme.md +34 -0
- mesa/examples/advanced/epstein_civil_violence/__init__.py +0 -0
- mesa/examples/advanced/epstein_civil_violence/agents.py +158 -0
- mesa/examples/advanced/epstein_civil_violence/app.py +72 -0
- mesa/examples/advanced/epstein_civil_violence/model.py +146 -0
- mesa/examples/advanced/pd_grid/Readme.md +43 -0
- mesa/examples/advanced/pd_grid/__init__.py +0 -0
- mesa/examples/advanced/pd_grid/agents.py +50 -0
- mesa/examples/advanced/pd_grid/analysis.ipynb +228 -0
- mesa/examples/advanced/pd_grid/app.py +50 -0
- mesa/examples/advanced/pd_grid/model.py +71 -0
- mesa/examples/advanced/sugarscape_g1mt/Readme.md +64 -0
- mesa/examples/advanced/sugarscape_g1mt/__init__.py +0 -0
- mesa/examples/advanced/sugarscape_g1mt/agents.py +344 -0
- mesa/examples/advanced/sugarscape_g1mt/app.py +70 -0
- mesa/examples/advanced/sugarscape_g1mt/model.py +180 -0
- mesa/examples/advanced/sugarscape_g1mt/sugar-map.txt +50 -0
- mesa/examples/advanced/sugarscape_g1mt/tests.py +69 -0
- mesa/examples/advanced/wolf_sheep/Readme.md +57 -0
- mesa/examples/advanced/wolf_sheep/__init__.py +0 -0
- mesa/examples/advanced/wolf_sheep/agents.py +102 -0
- mesa/examples/advanced/wolf_sheep/app.py +77 -0
- mesa/examples/advanced/wolf_sheep/model.py +137 -0
- mesa/examples/basic/__init__.py +0 -0
- mesa/examples/basic/boid_flockers/Readme.md +22 -0
- mesa/examples/basic/boid_flockers/__init__.py +0 -0
- mesa/examples/basic/boid_flockers/agents.py +71 -0
- mesa/examples/basic/boid_flockers/app.py +58 -0
- mesa/examples/basic/boid_flockers/model.py +69 -0
- mesa/examples/basic/boltzmann_wealth_model/Readme.md +56 -0
- mesa/examples/basic/boltzmann_wealth_model/__init__.py +0 -0
- mesa/examples/basic/boltzmann_wealth_model/agents.py +31 -0
- mesa/examples/basic/boltzmann_wealth_model/app.py +65 -0
- mesa/examples/basic/boltzmann_wealth_model/model.py +43 -0
- mesa/examples/basic/boltzmann_wealth_model/st_app.py +115 -0
- mesa/examples/basic/conways_game_of_life/Readme.md +39 -0
- mesa/examples/basic/conways_game_of_life/__init__.py +0 -0
- mesa/examples/basic/conways_game_of_life/agents.py +47 -0
- mesa/examples/basic/conways_game_of_life/app.py +39 -0
- mesa/examples/basic/conways_game_of_life/model.py +31 -0
- mesa/examples/basic/conways_game_of_life/st_app.py +72 -0
- mesa/examples/basic/schelling/Readme.md +40 -0
- mesa/examples/basic/schelling/__init__.py +0 -0
- mesa/examples/basic/schelling/agents.py +26 -0
- mesa/examples/basic/schelling/analysis.ipynb +205 -0
- mesa/examples/basic/schelling/app.py +42 -0
- mesa/examples/basic/schelling/model.py +59 -0
- mesa/examples/basic/virus_on_network/Readme.md +61 -0
- mesa/examples/basic/virus_on_network/__init__.py +0 -0
- mesa/examples/basic/virus_on_network/agents.py +69 -0
- mesa/examples/basic/virus_on_network/app.py +136 -0
- mesa/examples/basic/virus_on_network/model.py +96 -0
- mesa/experimental/__init__.py +8 -2
- mesa/experimental/cell_space/cell.py +9 -0
- mesa/experimental/cell_space/discrete_space.py +13 -1
- mesa/experimental/cell_space/grid.py +13 -0
- mesa/experimental/cell_space/network.py +3 -0
- mesa/experimental/devs/eventlist.py +6 -0
- mesa/model.py +76 -12
- mesa/space.py +70 -5
- mesa/time.py +5 -3
- mesa/visualization/components/altair.py +87 -19
- mesa/visualization/components/matplotlib.py +65 -16
- mesa/visualization/solara_viz.py +13 -58
- {mesa-3.0.0b0.dist-info → mesa-3.0.0b2.dist-info}/METADATA +1 -3
- mesa-3.0.0b2.dist-info/RECORD +93 -0
- mesa/cookiecutter-mesa/cookiecutter.json +0 -8
- mesa/cookiecutter-mesa/hooks/post_gen_project.py +0 -13
- mesa/cookiecutter-mesa/{{cookiecutter.snake}}/README.md +0 -4
- mesa/cookiecutter-mesa/{{cookiecutter.snake}}/app.pytemplate +0 -27
- mesa/cookiecutter-mesa/{{cookiecutter.snake}}/setup.pytemplate +0 -11
- mesa/cookiecutter-mesa/{{cookiecutter.snake}}/{{cookiecutter.snake}}/__init__.py +0 -1
- mesa/cookiecutter-mesa/{{cookiecutter.snake}}/{{cookiecutter.snake}}/model.pytemplate +0 -60
- mesa/main.py +0 -65
- mesa-3.0.0b0.dist-info/RECORD +0 -45
- {mesa-3.0.0b0.dist-info → mesa-3.0.0b2.dist-info}/WHEEL +0 -0
- {mesa-3.0.0b0.dist-info → mesa-3.0.0b2.dist-info}/entry_points.txt +0 -0
- {mesa-3.0.0b0.dist-info → mesa-3.0.0b2.dist-info}/licenses/LICENSE +0 -0
- {mesa-3.0.0b0.dist-info → mesa-3.0.0b2.dist-info}/licenses/NOTICE +0 -0
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from mesa.examples.advanced.epstein_civil_violence.agents import Citizen, Cop
|
|
2
|
+
from mesa.examples.advanced.epstein_civil_violence.model import EpsteinCivilViolence
|
|
3
|
+
from mesa.visualization import (
|
|
4
|
+
Slider,
|
|
5
|
+
SolaraViz,
|
|
6
|
+
make_plot_measure,
|
|
7
|
+
make_space_matplotlib,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
COP_COLOR = "#000000"
|
|
11
|
+
AGENT_QUIET_COLOR = "#648FFF"
|
|
12
|
+
AGENT_REBEL_COLOR = "#FE6100"
|
|
13
|
+
JAIL_COLOR = "#808080"
|
|
14
|
+
JAIL_SHAPE = "rect"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def citizen_cop_portrayal(agent):
|
|
18
|
+
if agent is None:
|
|
19
|
+
return
|
|
20
|
+
|
|
21
|
+
portrayal = {
|
|
22
|
+
"size": 25,
|
|
23
|
+
"shape": "s", # square marker
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
if isinstance(agent, Citizen):
|
|
27
|
+
color = (
|
|
28
|
+
AGENT_QUIET_COLOR if agent.condition == "Quiescent" else AGENT_REBEL_COLOR
|
|
29
|
+
)
|
|
30
|
+
color = JAIL_COLOR if agent.jail_sentence else color
|
|
31
|
+
shape = JAIL_SHAPE if agent.jail_sentence else "circle"
|
|
32
|
+
portrayal["color"] = color
|
|
33
|
+
portrayal["shape"] = shape
|
|
34
|
+
if shape == "s":
|
|
35
|
+
portrayal["w"] = 0.9
|
|
36
|
+
portrayal["h"] = 0.9
|
|
37
|
+
else:
|
|
38
|
+
portrayal["r"] = 0.5
|
|
39
|
+
portrayal["filled"] = False
|
|
40
|
+
portrayal["layer"] = 0
|
|
41
|
+
|
|
42
|
+
elif isinstance(agent, Cop):
|
|
43
|
+
portrayal["color"] = COP_COLOR
|
|
44
|
+
portrayal["r"] = 0.9
|
|
45
|
+
portrayal["layer"] = 1
|
|
46
|
+
|
|
47
|
+
return portrayal
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
model_params = {
|
|
51
|
+
"height": 40,
|
|
52
|
+
"width": 40,
|
|
53
|
+
"citizen_density": Slider("Initial Agent Density", 0.7, 0.0, 0.9, 0.1),
|
|
54
|
+
"cop_density": Slider("Initial Cop Density", 0.04, 0.0, 0.1, 0.01),
|
|
55
|
+
"citizen_vision": Slider("Citizen Vision", 7, 1, 10, 1),
|
|
56
|
+
"cop_vision": Slider("Cop Vision", 7, 1, 10, 1),
|
|
57
|
+
"legitimacy": Slider("Government Legitimacy", 0.82, 0.0, 1, 0.01),
|
|
58
|
+
"max_jail_term": Slider("Max Jail Term", 30, 0, 50, 1),
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
space_component = make_space_matplotlib(citizen_cop_portrayal)
|
|
62
|
+
chart_component = make_plot_measure(["Quiescent", "Active", "Jailed"])
|
|
63
|
+
|
|
64
|
+
epstein_model = EpsteinCivilViolence()
|
|
65
|
+
|
|
66
|
+
page = SolaraViz(
|
|
67
|
+
epstein_model,
|
|
68
|
+
components=[space_component, chart_component],
|
|
69
|
+
model_params=model_params,
|
|
70
|
+
name="Epstein Civil Violence",
|
|
71
|
+
)
|
|
72
|
+
page # noqa
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
import mesa
|
|
2
|
+
from mesa.examples.advanced.epstein_civil_violence.agents import Citizen, Cop
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class EpsteinCivilViolence(mesa.Model):
|
|
6
|
+
"""
|
|
7
|
+
Model 1 from "Modeling civil violence: An agent-based computational
|
|
8
|
+
approach," by Joshua Epstein.
|
|
9
|
+
http://www.pnas.org/content/99/suppl_3/7243.full
|
|
10
|
+
Attributes:
|
|
11
|
+
height: grid height
|
|
12
|
+
width: grid width
|
|
13
|
+
citizen_density: approximate % of cells occupied by citizens.
|
|
14
|
+
cop_density: approximate % of cells occupied by cops.
|
|
15
|
+
citizen_vision: number of cells in each direction (N, S, E and W) that
|
|
16
|
+
citizen can inspect
|
|
17
|
+
cop_vision: number of cells in each direction (N, S, E and W) that cop
|
|
18
|
+
can inspect
|
|
19
|
+
legitimacy: (L) citizens' perception of regime legitimacy, equal
|
|
20
|
+
across all citizens
|
|
21
|
+
max_jail_term: (J_max)
|
|
22
|
+
active_threshold: if (grievance - (risk_aversion * arrest_probability))
|
|
23
|
+
> threshold, citizen rebels
|
|
24
|
+
arrest_prob_constant: set to ensure agents make plausible arrest
|
|
25
|
+
probability estimates
|
|
26
|
+
movement: binary, whether agents try to move at step end
|
|
27
|
+
max_iters: model may not have a natural stopping point, so we set a
|
|
28
|
+
max.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
width=40,
|
|
34
|
+
height=40,
|
|
35
|
+
citizen_density=0.7,
|
|
36
|
+
cop_density=0.074,
|
|
37
|
+
citizen_vision=7,
|
|
38
|
+
cop_vision=7,
|
|
39
|
+
legitimacy=0.8,
|
|
40
|
+
max_jail_term=1000,
|
|
41
|
+
active_threshold=0.1,
|
|
42
|
+
arrest_prob_constant=2.3,
|
|
43
|
+
movement=True,
|
|
44
|
+
max_iters=1000,
|
|
45
|
+
seed=None,
|
|
46
|
+
):
|
|
47
|
+
super().__init__(seed=seed)
|
|
48
|
+
self.width = width
|
|
49
|
+
self.height = height
|
|
50
|
+
self.citizen_density = citizen_density
|
|
51
|
+
self.cop_density = cop_density
|
|
52
|
+
self.citizen_vision = citizen_vision
|
|
53
|
+
self.cop_vision = cop_vision
|
|
54
|
+
self.legitimacy = legitimacy
|
|
55
|
+
self.max_jail_term = max_jail_term
|
|
56
|
+
self.active_threshold = active_threshold
|
|
57
|
+
self.arrest_prob_constant = arrest_prob_constant
|
|
58
|
+
self.movement = movement
|
|
59
|
+
self.max_iters = max_iters
|
|
60
|
+
self.iteration = 0
|
|
61
|
+
|
|
62
|
+
self.grid = mesa.experimental.cell_space.OrthogonalMooreGrid(
|
|
63
|
+
(width, height), capacity=1, torus=True, random=self.random
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
model_reporters = {
|
|
67
|
+
"Quiescent": lambda m: self.count_type_citizens(m, "Quiescent"),
|
|
68
|
+
"Active": lambda m: self.count_type_citizens(m, "Active"),
|
|
69
|
+
"Jailed": self.count_jailed,
|
|
70
|
+
"Cops": self.count_cops,
|
|
71
|
+
}
|
|
72
|
+
agent_reporters = {
|
|
73
|
+
"x": lambda a: a.cell.coordinate[0],
|
|
74
|
+
"y": lambda a: a.cell.coordinate[1],
|
|
75
|
+
"breed": lambda a: type(a).__name__,
|
|
76
|
+
"jail_sentence": lambda a: getattr(a, "jail_sentence", None),
|
|
77
|
+
"condition": lambda a: getattr(a, "condition", None),
|
|
78
|
+
"arrest_probability": lambda a: getattr(a, "arrest_probability", None),
|
|
79
|
+
}
|
|
80
|
+
self.datacollector = mesa.DataCollector(
|
|
81
|
+
model_reporters=model_reporters, agent_reporters=agent_reporters
|
|
82
|
+
)
|
|
83
|
+
if self.cop_density + self.citizen_density > 1:
|
|
84
|
+
raise ValueError("Cop density + citizen density must be less than 1")
|
|
85
|
+
|
|
86
|
+
for cell in self.grid.all_cells:
|
|
87
|
+
if self.random.random() < self.cop_density:
|
|
88
|
+
cop = Cop(self, vision=self.cop_vision)
|
|
89
|
+
cop.move_to(cell)
|
|
90
|
+
|
|
91
|
+
elif self.random.random() < (self.cop_density + self.citizen_density):
|
|
92
|
+
citizen = Citizen(
|
|
93
|
+
self,
|
|
94
|
+
hardship=self.random.random(),
|
|
95
|
+
regime_legitimacy=self.legitimacy,
|
|
96
|
+
risk_aversion=self.random.random(),
|
|
97
|
+
threshold=self.active_threshold,
|
|
98
|
+
vision=self.citizen_vision,
|
|
99
|
+
)
|
|
100
|
+
citizen.move_to(cell)
|
|
101
|
+
|
|
102
|
+
self.running = True
|
|
103
|
+
self.datacollector.collect(self)
|
|
104
|
+
|
|
105
|
+
def step(self):
|
|
106
|
+
"""
|
|
107
|
+
Advance the model by one step and collect data.
|
|
108
|
+
"""
|
|
109
|
+
self.agents.shuffle_do("step")
|
|
110
|
+
# collect data
|
|
111
|
+
self.datacollector.collect(self)
|
|
112
|
+
self.iteration += 1
|
|
113
|
+
if self.iteration > self.max_iters:
|
|
114
|
+
self.running = False
|
|
115
|
+
|
|
116
|
+
@staticmethod
|
|
117
|
+
def count_type_citizens(model, condition, exclude_jailed=True):
|
|
118
|
+
"""
|
|
119
|
+
Helper method to count agents by Quiescent/Active.
|
|
120
|
+
"""
|
|
121
|
+
citizens = model.agents_by_type[Citizen]
|
|
122
|
+
|
|
123
|
+
if exclude_jailed:
|
|
124
|
+
return len(
|
|
125
|
+
[
|
|
126
|
+
c
|
|
127
|
+
for c in citizens
|
|
128
|
+
if (c.condition == condition) and (c.jail_sentence == 0)
|
|
129
|
+
]
|
|
130
|
+
)
|
|
131
|
+
else:
|
|
132
|
+
return len([c for c in citizens if c.condition == condition])
|
|
133
|
+
|
|
134
|
+
@staticmethod
|
|
135
|
+
def count_jailed(model):
|
|
136
|
+
"""
|
|
137
|
+
Helper method to count jailed agents.
|
|
138
|
+
"""
|
|
139
|
+
return len([a for a in model.agents_by_type[Citizen] if a.jail_sentence > 0])
|
|
140
|
+
|
|
141
|
+
@staticmethod
|
|
142
|
+
def count_cops(model):
|
|
143
|
+
"""
|
|
144
|
+
Helper method to count jailed agents.
|
|
145
|
+
"""
|
|
146
|
+
return len(model.agents_by_type[Cop])
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# Demographic Prisoner's Dilemma on a Grid
|
|
2
|
+
|
|
3
|
+
## Summary
|
|
4
|
+
|
|
5
|
+
The Demographic Prisoner's Dilemma is a family of variants on the classic two-player [Prisoner's Dilemma]. The model consists of agents, each with a strategy of either Cooperate or Defect. Each agent's payoff is based on its strategy and the strategies of its spatial neighbors. After each step of the model, the agents adopt the strategy of their neighbor with the highest total score.
|
|
6
|
+
|
|
7
|
+
The model payoff table is:
|
|
8
|
+
|
|
9
|
+
| | Cooperate | Defect|
|
|
10
|
+
|:-------------:|:---------:|:-----:|
|
|
11
|
+
| **Cooperate** | 1, 1 | 0, D |
|
|
12
|
+
| **Defect** | D, 0 | 0, 0 |
|
|
13
|
+
|
|
14
|
+
Where *D* is the defection bonus, generally set higher than 1. In these runs, the defection bonus is set to $D=1.6$.
|
|
15
|
+
|
|
16
|
+
The Demographic Prisoner's Dilemma demonstrates how simple rules can lead to the emergence of widespread cooperation, despite the Defection strategy dominating each individual interaction game. However, it is also interesting for another reason: it is known to be sensitive to the activation regime employed in it.
|
|
17
|
+
|
|
18
|
+
## How to Run
|
|
19
|
+
|
|
20
|
+
##### Web based model simulation
|
|
21
|
+
|
|
22
|
+
To run the model interactively, run ``solara run app.py`` in this directory.
|
|
23
|
+
|
|
24
|
+
##### Jupyter Notebook
|
|
25
|
+
|
|
26
|
+
Launch the ``Demographic Prisoner's Dilemma Activation Schedule.ipynb`` notebook and run the code.
|
|
27
|
+
|
|
28
|
+
## Files
|
|
29
|
+
|
|
30
|
+
* ``agents.py``: contains the agent class.
|
|
31
|
+
* ``model.py``: contains the model class; the model takes a ``activation_order`` string as an argument, which determines in which order agents are activated: Sequential, Random or Simultaneous.
|
|
32
|
+
* ``app.py``: contains the interactive visualization server.
|
|
33
|
+
* ``Demographic Prisoner's Dilemma Activation Schedule.ipynb``: Jupyter Notebook for running the scheduling experiment. This runs the model three times, one for each activation type, and demonstrates how the activation regime drives the model to different outcomes.
|
|
34
|
+
|
|
35
|
+
## Further Reading
|
|
36
|
+
|
|
37
|
+
This model is adapted from:
|
|
38
|
+
|
|
39
|
+
Wilensky, U. (2002). NetLogo PD Basic Evolutionary model. http://ccl.northwestern.edu/netlogo/models/PDBasicEvolutionary. Center for Connected Learning and Computer-Based Modeling, Northwestern University, Evanston, IL.
|
|
40
|
+
|
|
41
|
+
The Demographic Prisoner's Dilemma originates from:
|
|
42
|
+
|
|
43
|
+
[Epstein, J. Zones of Cooperation in Demographic Prisoner's Dilemma. 1998.](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.8.8629&rep=rep1&type=pdf)
|
|
File without changes
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from mesa.experimental.cell_space import CellAgent
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class PDAgent(CellAgent):
|
|
5
|
+
"""Agent member of the iterated, spatial prisoner's dilemma model."""
|
|
6
|
+
|
|
7
|
+
def __init__(self, model, starting_move=None):
|
|
8
|
+
"""
|
|
9
|
+
Create a new Prisoner's Dilemma agent.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
model: model instance
|
|
13
|
+
starting_move: If provided, determines the agent's initial state:
|
|
14
|
+
C(ooperating) or D(efecting). Otherwise, random.
|
|
15
|
+
"""
|
|
16
|
+
super().__init__(model)
|
|
17
|
+
self.score = 0
|
|
18
|
+
if starting_move:
|
|
19
|
+
self.move = starting_move
|
|
20
|
+
else:
|
|
21
|
+
self.move = self.random.choice(["C", "D"])
|
|
22
|
+
self.next_move = None
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def is_cooroperating(self):
|
|
26
|
+
return self.move == "C"
|
|
27
|
+
|
|
28
|
+
def step(self):
|
|
29
|
+
"""Get the best neighbor's move, and change own move accordingly
|
|
30
|
+
if better than own score."""
|
|
31
|
+
|
|
32
|
+
# neighbors = self.model.grid.get_neighbors(self.pos, True, include_center=True)
|
|
33
|
+
neighbors = [*list(self.cell.neighborhood.agents), self]
|
|
34
|
+
best_neighbor = max(neighbors, key=lambda a: a.score)
|
|
35
|
+
self.next_move = best_neighbor.move
|
|
36
|
+
|
|
37
|
+
if self.model.activation_order != "Simultaneous":
|
|
38
|
+
self.advance()
|
|
39
|
+
|
|
40
|
+
def advance(self):
|
|
41
|
+
self.move = self.next_move
|
|
42
|
+
self.score += self.increment_score()
|
|
43
|
+
|
|
44
|
+
def increment_score(self):
|
|
45
|
+
neighbors = self.cell.neighborhood.agents
|
|
46
|
+
if self.model.activation_order == "Simultaneous":
|
|
47
|
+
moves = [neighbor.next_move for neighbor in neighbors]
|
|
48
|
+
else:
|
|
49
|
+
moves = [neighbor.move for neighbor in neighbors]
|
|
50
|
+
return sum(self.model.payoff[(self.move, move)] for move in moves)
|