ludics 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ludics-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Harry Foster, Vince Knight
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
ludics-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,70 @@
1
+ Metadata-Version: 2.3
2
+ Name: ludics
3
+ Version: 0.1.0
4
+ Summary: A python library for the study of heterogenous population dynamics
5
+ License: MIT License
6
+
7
+ Copyright (c) 2025 Harry Foster, Vince Knight
8
+
9
+ Permission is hereby granted, free of charge, to any person obtaining a copy
10
+ of this software and associated documentation files (the "Software"), to deal
11
+ in the Software without restriction, including without limitation the rights
12
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13
+ copies of the Software, and to permit persons to whom the Software is
14
+ furnished to do so, subject to the following conditions:
15
+
16
+ The above copyright notice and this permission notice shall be included in all
17
+ copies or substantial portions of the Software.
18
+
19
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25
+ SOFTWARE.
26
+ Requires-Dist: black>=25.9.0
27
+ Requires-Dist: numpy>=2.3.3
28
+ Requires-Dist: sympy>=1.14.0
29
+ Requires-Dist: scipy>=1.16.3
30
+ Requires-Dist: pandas>=3.0.0
31
+ Requires-Python: >=3.13
32
+ Description-Content-Type: text/markdown
33
+
34
+ # Ludics
35
+
36
+ A library for the study of heterogeneous population dynamics.
37
+
38
+ ## Usage
39
+
40
+ To run tests:
41
+
42
+ For source code:
43
+
44
+ ```
45
+ $ uv run pytest tests
46
+ ```
47
+
48
+ For docs:
49
+
50
+ ```
51
+ $ uv run pytest --doctest-glob="*.md" docs
52
+ ```
53
+
54
+ To run benchmarks:
55
+
56
+ ```
57
+ $ uv run pytest benchmarks
58
+ ```
59
+
60
+ To run ruff:
61
+
62
+ ```
63
+ $ uv run ruff format
64
+ ```
65
+
66
+ To check ruff:
67
+
68
+ ```
69
+ $ uv run ruff check
70
+ ```
ludics-0.1.0/README.md ADDED
@@ -0,0 +1,37 @@
1
+ # Ludics
2
+
3
+ A library for the study of heterogeneous population dynamics.
4
+
5
+ ## Usage
6
+
7
+ To run tests:
8
+
9
+ For source code:
10
+
11
+ ```
12
+ $ uv run pytest tests
13
+ ```
14
+
15
+ For docs:
16
+
17
+ ```
18
+ $ uv run pytest --doctest-glob="*.md" docs
19
+ ```
20
+
21
+ To run benchmarks:
22
+
23
+ ```
24
+ $ uv run pytest benchmarks
25
+ ```
26
+
27
+ To run ruff:
28
+
29
+ ```
30
+ $ uv run ruff format
31
+ ```
32
+
33
+ To check ruff:
34
+
35
+ ```
36
+ $ uv run ruff check
37
+ ```
@@ -0,0 +1,34 @@
1
+ [project]
2
+ name = "ludics"
3
+ version = "0.1.0"
4
+ description = "A python library for the study of heterogenous population dynamics"
5
+ readme = "README.md"
6
+ license = { file = "LICENSE" }
7
+ requires-python = ">=3.13"
8
+ dependencies = [
9
+ "black>=25.9.0",
10
+ "numpy>=2.3.3",
11
+ "sympy>=1.14.0",
12
+ "scipy>=1.16.3",
13
+ "pandas>=3.0.0",
14
+ ]
15
+
16
+ [dependency-groups]
17
+ dev = [
18
+ "pytest>=9.0.2",
19
+ "pytest-cov>=7.0.0",
20
+ "pytest-benchmark>=5.2.3",
21
+ "ruff>=0.9.0",
22
+ "ty>=0.0.1a1",
23
+ ]
24
+ docs = [
25
+ "mkdocs>=1.6.0",
26
+ "pymdown-extensions>=10.0",
27
+ ]
28
+
29
+ [build-system]
30
+ requires = ["uv_build>=0.10.7,<0.11.0"]
31
+ build-backend = "uv_build"
32
+
33
+ [tool.ruff]
34
+ target-version = "py314"
@@ -0,0 +1,47 @@
1
+ from ludics.main import (
2
+ get_state_space,
3
+ compute_moran_transition_probability,
4
+ fermi_imitation_function,
5
+ compute_fermi_transition_probability,
6
+ compute_imitation_introspection_transition_probability,
7
+ compute_introspection_transition_probability,
8
+ compute_aspiration_transition_probability,
9
+ apply_mutation_probability,
10
+ generate_transition_matrix,
11
+ get_absorbing_state_index,
12
+ get_absorbing_states,
13
+ get_absorption_probabilities,
14
+ extract_Q,
15
+ extract_R_numerical,
16
+ extract_R_symbolic,
17
+ compute_absorption_matrix,
18
+ calculate_absorption_matrix,
19
+ compute_steady_state,
20
+ calculate_steady_state,
21
+ get_neighbourhood_states,
22
+ simulate_markov_chain,
23
+ )
24
+
25
+ __all__ = [
26
+ "get_state_space",
27
+ "compute_moran_transition_probability",
28
+ "fermi_imitation_function",
29
+ "compute_fermi_transition_probability",
30
+ "compute_imitation_introspection_transition_probability",
31
+ "compute_introspection_transition_probability",
32
+ "compute_aspiration_transition_probability",
33
+ "apply_mutation_probability",
34
+ "generate_transition_matrix",
35
+ "get_absorbing_state_index",
36
+ "get_absorbing_states",
37
+ "get_absorption_probabilities",
38
+ "extract_Q",
39
+ "extract_R_numerical",
40
+ "extract_R_symbolic",
41
+ "compute_absorption_matrix",
42
+ "calculate_absorption_matrix",
43
+ "compute_steady_state",
44
+ "calculate_steady_state",
45
+ "get_neighbourhood_states",
46
+ "simulate_markov_chain",
47
+ ]
@@ -0,0 +1,105 @@
1
+ import numpy as np
2
+ import sympy as sym
3
+
4
+
5
+ def heterogeneous_contribution_pgg_fitness_function(
6
+ state, r, contribution_vector, **kwargs
7
+ ):
8
+ """Public goods fitness function where players contribute a different
9
+ amount.
10
+
11
+ Parameters
12
+ -----------
13
+
14
+ state: numpy.array, the ordered set of actions each player takes; 1 for
15
+ contributing, 0 for free-riding.
16
+
17
+ r: float, the parameter which the public goods is multiplied by
18
+
19
+ contribution_vector: numpy.array, the value which each player contributes
20
+
21
+ Returns
22
+ -------
23
+
24
+ numpy.array: an ordered vector of each player's fitness."""
25
+
26
+ total_goods = (
27
+ r
28
+ * sum(
29
+ action * contribution
30
+ for action, contribution in zip(state, contribution_vector)
31
+ )
32
+ / len(state)
33
+ )
34
+
35
+ payoff_vector = np.array(
36
+ [
37
+ total_goods - (action * contribution)
38
+ for action, contribution in zip(state, contribution_vector)
39
+ ]
40
+ )
41
+
42
+ return payoff_vector
43
+
44
+
45
+ def homogeneous_pgg_fitness_function(state, alpha, r, **kwargs):
46
+ """
47
+ Public goods fitness function where all players contribute the same amount.
48
+ They therefore have a return of 1 + (selection_intensity * payoff), This
49
+ is the selection intensity $\epsilon$, which determines the effect of
50
+ payoff on a player's fitness.
51
+
52
+ Parameters
53
+ -----------
54
+ state: numpy.array, the ordered set of actions each player takes
55
+
56
+ alpha: float, each player's contribution
57
+
58
+ r: float, the parameter which the public goods is multiplied by
59
+
60
+ epsilon: float, the selection intensity determining the effect of payoff on
61
+ a player's fitness. Must satisfy $0 < \epsilon < \frac{N}{(N-r)\alpha}$ if r<N
62
+
63
+ Returns
64
+ -------
65
+ numpy.array: an ordered array of each player's fitness"""
66
+ homogeneous_contribution_vector = np.array([alpha for _ in enumerate(state)])
67
+ return heterogeneous_contribution_pgg_fitness_function(
68
+ state=state,
69
+ r=r,
70
+ contribution_vector=homogeneous_contribution_vector,
71
+ **kwargs,
72
+ )
73
+
74
+
75
+ def general_four_state_fitness_function(state, **kwargs):
76
+ """
77
+ Returns a general fitness function for each player in a general 2 player population,
78
+ according to the rule
79
+ $f_i(x)$ is the fitness of player i in state x, indexed from 1.
80
+
81
+ In this case, the states correspond to:
82
+ $a=(0,0)$, $b=(0,1)$, $c=(1,0)$, $d=(1,1)$
83
+ This is the same state space as we have in the Population Dynamics section
84
+ of main.tex.
85
+
86
+ Parameters
87
+ -----------
88
+ state: numpy.array, the ordered set of actions each player takes
89
+
90
+ Returns
91
+ -------
92
+ numpy.array: an ordered array of each player's fitness"""
93
+
94
+ if (state == np.array([0, 0])).all():
95
+ state_symbol = sym.Symbol("a")
96
+ if (state == np.array([0, 1])).all():
97
+ state_symbol = sym.Symbol("b")
98
+ if (state == np.array([1, 0])).all():
99
+ state_symbol = sym.Symbol("c")
100
+ if (state == np.array([1, 1])).all():
101
+ state_symbol = sym.Symbol("d")
102
+
103
+ return np.array(
104
+ [sym.Function(f"f_{i + 1}")(state_symbol) for i, j in enumerate(state)]
105
+ )
@@ -0,0 +1,873 @@
1
+ """
2
+ Code for the general heterogeneous Moran process
3
+
4
+ This corresponds to the model described in `main.tex`
5
+
6
+ Assume we have N ordered individuals of k types: $A_1$, ... $A_K$
7
+
8
+ We have a state v \in S = [$v_1$, .... $v_n$] as the set of types of individuals in the population, so that $v_i$ is the type of individual i. |S| = $k^N$
9
+
10
+ There is also a fitness function f: S -> $R^N$, giving us an array of the fitness of individuals
11
+ """
12
+
13
+ import itertools
14
+ import numpy as np
15
+ import sympy as sym
16
+ import scipy
17
+ import collections
18
+
19
+
20
+ def get_state_space(N, k):
21
+ """
22
+ Return state space for a given N and K
23
+
24
+ Parameters:
25
+ -----------
26
+ N: integer, number of individuals
27
+
28
+ k: integer, number of possible types
29
+
30
+ Returns:
31
+ --------
32
+ Array of possible states within the system, sorted based on the
33
+ total values of the rows, in order to ensure a consistent result
34
+ """
35
+ state_space = np.array(list(itertools.product(range(k), repeat=N)))
36
+
37
+ return state_space
38
+
39
+
40
+ def compute_moran_transition_probability(
41
+ source, target, fitness_function, selection_intensity, **kwargs
42
+ ):
43
+ """
44
+ Given two states and a fitness function, returns the transition probability
45
+ when moving from the source state to the target state. Must move between
46
+ states with a Hamming distance of 1.
47
+
48
+ Returns 0 if Hamming distance > 1.
49
+ Returns None if Hamming distance = 0.
50
+
51
+
52
+ $\frac{\sum_{v_i = u_{i*}}{f(v_i)}}{\sum_{v_i}f(v_i)}$
53
+
54
+ Parameters
55
+ ----------
56
+ source: numpy.array, the starting state
57
+
58
+ target: numpy.array, what the source transitions to
59
+
60
+ fitness_function: func, The fitness function which maps a state to a
61
+ numpy.array where each entry represents the fitness of the given individual
62
+
63
+ selection_intensity: float, the selection intensity $\epsilon$ of the
64
+ system
65
+
66
+ Returns
67
+ ---------
68
+ Float: the transition pobability from source to target
69
+ """
70
+ different_indices = np.where(source != target)
71
+ if len(different_indices[0]) > 1:
72
+ return 0
73
+ if len(different_indices[0]) == 0:
74
+ return None
75
+ fitness = 1 + (selection_intensity * fitness_function(source, **kwargs))
76
+ denominator = fitness.sum() * len(source)
77
+ numerator = fitness[source == target[different_indices]].sum()
78
+ return numerator / denominator
79
+
80
+
81
+ def fermi_imitation_function(delta, choice_intensity=0.5, **kwargs):
82
+ """
83
+ Given the fitness of the focal individual who changes action type, and the
84
+ target individual who is being copied, as well as the choice intensity,
85
+ returns $\phi(a_i, a_j) = \frac{1}{1 + \exp({\frac{f(a_{i}) - f(a_{j})
86
+ }{\beta}})}$
87
+
88
+ choice intensity is set to 0.5 by default, as is common according to:
89
+
90
+ Xiaojian Maa, Ji Quana, Xianjia Wang (2021): Effect of reputation-based
91
+ heterogeneous investment on cooperation in spatial public goods games
92
+
93
+
94
+ Parameters
95
+ -----------
96
+
97
+ delta: float or sym.Symbol, the difference between the current fitness of
98
+ an individual and the considered fitness.
99
+
100
+ choice_intensity: float or sym.Symbol, a parameter which determines the
101
+ effect the difference in fitness has on the transition probability. As
102
+ choice_intensity goes to infinity, the probability of transitioning goes
103
+ to $\frac{1}{2}$
104
+ """
105
+ return 1 / (1 + sym.E ** (choice_intensity * (delta)))
106
+
107
+
108
+ def compute_fermi_transition_probability(
109
+ source, target, fitness_function, choice_intensity, **kwargs
110
+ ):
111
+ """
112
+ Given two states, a fitness function, and a choice intensity, returns
113
+ the transition probability when moving from the source state to the target
114
+ state. Must move between states with a Hamming distance of 1.
115
+
116
+ Returns 0 if Hamming distance > 1.
117
+ Returns None if Hamming distance = 0.
118
+
119
+ The following equation is the subject of this function:
120
+
121
+ $\sum_{a_j=b_{i^*}}^N\frac{1}{N(N-1)}\phi(f_i(a) - f(a_j))$
122
+
123
+ where $\phi(f_i(a) - f(a_j)) = \frac{1}{1 + \exp({\frac{f(a_{i}) - f(a_{j})
124
+ }{\beta}})}$
125
+
126
+ Parameters
127
+ ----------
128
+ source: numpy.array, the starting state
129
+
130
+ target: numpy.array, what the source transitions to
131
+
132
+ fitness_function: func, The fitness function which maps a state to a
133
+ numpy.array
134
+
135
+ choice_intensity: float or sympy.Symbol: the choice intensity of the
136
+ function. The lower the value, the higher the probability that a player
137
+ will choose the higher fitness strategy in $\phi$
138
+
139
+ Returns
140
+ ---------
141
+ Float: the transition pobability from source to target"""
142
+
143
+ different_indices = np.where(source != target)
144
+ if len(different_indices[0]) > 1:
145
+ return 0
146
+ if len(different_indices[0]) == 0:
147
+ return None
148
+ fitness = fitness_function(source, **kwargs)
149
+
150
+ changes = [
151
+ fermi_imitation_function(
152
+ delta=fitness[different_indices] - fitness[i],
153
+ choice_intensity=choice_intensity,
154
+ **kwargs,
155
+ )
156
+ for i in np.where(source == target[different_indices])
157
+ ]
158
+
159
+ scalar = 1 / (len(source) * (len(source) - 1))
160
+ return (scalar * np.array(changes)).sum()
161
+
162
+
163
+ def compute_imitation_introspection_transition_probability(
164
+ source, target, fitness_function, choice_intensity, selection_intensity, **kwargs
165
+ ):
166
+ """
167
+ Given two states, a fitness function, and a choice intensity, returns
168
+ the transition probability when moving from the source state to the target
169
+ state in introspective imitation dynamics. Must move between states with a
170
+ Hamming distance of 1.
171
+
172
+ Returns 0 if Hamming distance > 1.
173
+ Returns None if Hamming distance = 0.
174
+
175
+ The following equation is the subject of this function:
176
+
177
+ $\frac{1}{N}\frac{\sum_{a_{j} = b_{I(\textbf{a}, \textbf{b})}}f_j(\textbf{a})}{\sum_{k}f_k(\textbf{a})}\phi(\Delta(f_{I(\textbf{a,b})}))$
178
+
179
+ Parameters
180
+ ----------
181
+ source: numpy.array, the starting state
182
+
183
+ target: numpy.array, what the source transitions to
184
+
185
+ fitness_function: func, The fitness function which maps a state to a
186
+ numpy.array
187
+
188
+ choice_intensity: float or sympy.Symbol: the choice intensity of the
189
+ function. The lower the value, the higher the probability that a player
190
+ will choose the higher fitness strategy in $\phi$
191
+
192
+ selection_intensity: float or sympy.Symbol: the selection intensity
193
+ $\epsilon$ of the system
194
+
195
+ Returns
196
+ ---------
197
+ Float: the transition pobability from source to target"""
198
+
199
+ different_indices = np.where(source != target)
200
+ if len(different_indices[0]) > 1:
201
+ return 0
202
+ if len(different_indices[0]) == 0:
203
+ return None
204
+
205
+ fitness = fitness_function(source, **kwargs)
206
+ fitness_before = fitness[different_indices][0]
207
+ fitness_after = fitness_function(target, **kwargs)[different_indices][0]
208
+
209
+ selection_fitness = 1 + (selection_intensity * fitness)
210
+ selection_denominator = selection_fitness.sum() * len(source)
211
+ selection_numerator = selection_fitness[source == target[different_indices]].sum()
212
+ selection_probability = selection_numerator / selection_denominator
213
+
214
+ delta = fitness_before - fitness_after
215
+
216
+ return selection_probability * fermi_imitation_function(
217
+ delta=delta, choice_intensity=choice_intensity
218
+ )
219
+
220
+
221
+ def compute_introspection_transition_probability(
222
+ source, target, fitness_function, choice_intensity, number_of_strategies, **kwargs
223
+ ):
224
+ """
225
+ Given two states, a fitness function, and a choice intensity, returns
226
+ the transition probability when moving from the source state to the target
227
+ state under introspective imitation dynamics. Must move between states with
228
+ Hamming distance of 1.
229
+
230
+ Returns 0 if Hamming distance > 1.
231
+ Returns None if Hamming distance = 0.
232
+
233
+ The following equation is the subject of this function:
234
+ $\frac{1}{N(m_j - 1)}\phi(f_i(a) - f_i(b))$
235
+
236
+ Parameters
237
+ ----------
238
+ source: numpy.array, the starting state
239
+
240
+ target: numpy.array, what the source transitions to
241
+
242
+ fitness_function: func, The fitness function which maps a state to a
243
+ numpy.array
244
+
245
+ choice_intensity: float or sympy.Symbol: the choice intensity of the
246
+ function. The lower the value, the higher the probability that a player
247
+ will choose the higher fitness strategy in $\phi$
248
+
249
+ number_of_strategies: the number of strategies available to each player in
250
+ the population. What we call "k" in the get_state_space function
251
+
252
+ Returns
253
+ ---------
254
+ Float: the transition pobability from source to target"""
255
+
256
+ different_indices = np.where(source != target)
257
+ if len(different_indices[0]) > 1:
258
+ return 0
259
+ if len(different_indices[0]) == 0:
260
+ return None
261
+
262
+ fitness = fitness_function(source, **kwargs)
263
+ fitness_before = fitness[different_indices][0]
264
+ fitness_after = fitness_function(target, **kwargs)[different_indices][0]
265
+
266
+ selection_probability = 1 / (len(source) * ((number_of_strategies) - 1))
267
+
268
+ delta = fitness_before - fitness_after
269
+
270
+ return selection_probability * fermi_imitation_function(
271
+ delta=delta, choice_intensity=choice_intensity
272
+ )
273
+
274
+
275
+ def compute_aspiration_transition_probability(
276
+ source, target, fitness_function, choice_intensity, aspiration_vector, **kwargs
277
+ ):
278
+ """
279
+ Given two states, a fitness function, and a choice intensity, returns
280
+ the transition probability when moving from the source state to the target
281
+ state under aspiration dynamics. This dynamic takes the aspiration of a
282
+ given player and they will change action type with a probability
283
+ proportional to the difference between their current payoff, and their
284
+ aspired payoff.
285
+
286
+ Returns 0 if Hamming distance > 1.
287
+ Returns None if Hamming distance = 0.
288
+
289
+ Parameters
290
+ ----------
291
+ source: numpy.array, the starting state
292
+
293
+ target: numpy.array, what the source transitions to
294
+
295
+ fitness_function: func, The fitness function which maps a state to a
296
+ numpy.array
297
+
298
+ choice_intensity: float or sympy.Symbol: the choice intensity of the
299
+ function. The lower the value, the higher the probability that a player
300
+ will choose the higher fitness strategy in $\phi$
301
+
302
+ aspiration_vector: numpy.array: the aspiration of each player in a state.
303
+
304
+ returns
305
+ ---------
306
+ float: the transition pobability from source to target
307
+ """
308
+
309
+ if len(np.unique(source)) > 2:
310
+ raise ValueError("Aspiration Dynamics only supports 2 action types")
311
+ if len(np.unique(target)) > 2:
312
+ raise ValueError("Aspiration Dynamics only supports 2 action types")
313
+
314
+ different_indices = np.where(source != target)
315
+ if len(different_indices[0]) > 1:
316
+ return 0
317
+ if len(different_indices[0]) == 0:
318
+ return None
319
+
320
+ fitness = fitness_function(source, **kwargs)
321
+ fitness_before = fitness[different_indices][0]
322
+
323
+ selection_probability = 1 / (len(source))
324
+
325
+ delta = fitness_before - aspiration_vector[different_indices][0]
326
+
327
+ return selection_probability * fermi_imitation_function(
328
+ delta=delta, choice_intensity=choice_intensity
329
+ )
330
+
331
+
332
+ def apply_mutation_probability(
333
+ source, target, individual_to_action_mutation_probability, transition_probability
334
+ ):
335
+ """
336
+ Given two states and the probability of transitioning between them without
337
+ mutation, returns the probability of transitioning between them under
338
+ mutation.
339
+
340
+ Parameters
341
+ -----------
342
+ source: numpy.array, the starting state
343
+
344
+ target: numpy.array, the state which the source transitions to
345
+
346
+ individual_to_action_mutation_probability: numpy.array or None: the probability of each player
347
+ mutating to each action type. Row 0 corresponds to player 0, column 0
348
+ corresponds to action type 0. Action types must be written in the form of
349
+ 0,1,2,etc.
350
+
351
+ transition_probability: float - the probability of transitioning between
352
+ source and target without mutation.
353
+
354
+ returns
355
+ --------
356
+ float: the transition probability between source and target under mutation
357
+
358
+ """
359
+
360
+ different_indices = np.where(source != target)[0]
361
+
362
+ if len(different_indices) == 1:
363
+ index_of_difference = different_indices[0]
364
+ new_type = target[index_of_difference]
365
+
366
+ return transition_probability * (
367
+ 1
368
+ - np.sum(individual_to_action_mutation_probability, axis=1)[
369
+ different_indices
370
+ ].item()
371
+ ) + (
372
+ individual_to_action_mutation_probability[
373
+ different_indices, new_type
374
+ ].item()
375
+ / len(source)
376
+ )
377
+ return transition_probability
378
+
379
+
380
+ def generate_transition_matrix(
381
+ state_space,
382
+ fitness_function,
383
+ compute_transition_probability,
384
+ individual_to_action_mutation_probability=None,
385
+ **kwargs,
386
+ ):
387
+ """
388
+ Given a state space and a fitness function, returns the transition matrix
389
+
390
+ for the heterogeneous Moran process.
391
+
392
+ Parameters
393
+ ----------
394
+ state_space: numpy.array, the state space for the transition matrix
395
+
396
+ fitness_function: function, should return a size N numpy.array when passed
397
+ a state
398
+
399
+ compute_transition_probability: function, takes a source state, a target
400
+ state, and a fitness function, and returns the probability of transitioning
401
+ from the source state to the target state
402
+
403
+ individual_to_action_mutation_probability: numpy.array or None: the probability of each player
404
+ mutating to each action type. Row 0 corresponds to player 0, column 0
405
+ corresponds to action type 0. Action types must be written in the form of
406
+ 0,1,2,etc. If None, this will assume a vector of 0 probabilities.
407
+
408
+ Returns
409
+ ----------
410
+ numpy.array: the transition matrix
411
+ """
412
+ N = len(state_space)
413
+ transition_matrix = np.zeros(shape=(N, N))
414
+ if individual_to_action_mutation_probability is None:
415
+ individual_to_action_mutation_probability = np.zeros(shape=(N, N))
416
+ for row_index, source in enumerate(state_space):
417
+ for col_index, target in enumerate(state_space):
418
+ if row_index != col_index:
419
+ try:
420
+ transition_matrix[row_index, col_index] = (
421
+ apply_mutation_probability(
422
+ source=source,
423
+ target=target,
424
+ individual_to_action_mutation_probability=individual_to_action_mutation_probability,
425
+ transition_probability=(
426
+ compute_transition_probability(
427
+ source=source,
428
+ target=target,
429
+ fitness_function=fitness_function,
430
+ **kwargs,
431
+ )
432
+ ),
433
+ )
434
+ )
435
+ except TypeError:
436
+ transition_matrix = transition_matrix.astype(object)
437
+
438
+ transition_matrix[row_index, col_index] = (
439
+ apply_mutation_probability(
440
+ source=source,
441
+ target=target,
442
+ individual_to_action_mutation_probability=individual_to_action_mutation_probability,
443
+ transition_probability=(
444
+ compute_transition_probability(
445
+ source=source,
446
+ target=target,
447
+ fitness_function=fitness_function,
448
+ **kwargs,
449
+ )
450
+ ),
451
+ )
452
+ )
453
+
454
+ np.fill_diagonal(transition_matrix, 1 - transition_matrix.sum(axis=1))
455
+ return transition_matrix
456
+
457
+
458
+ def get_absorbing_state_index(state_space):
459
+ """Given a state space, returns the indexes of the absorbing states
460
+ (i.e, states with only one value repeated).
461
+
462
+ Parameters
463
+ -------------
464
+ state_space: numpy.array, an array of states
465
+
466
+ Returns
467
+ --------------
468
+ numpy.array of index values for the absorbing states"""
469
+
470
+ absorbing_index = np.where(np.all(state_space == state_space[:, [0]], axis=1))[0]
471
+
472
+ return absorbing_index if len(absorbing_index) >= 1 else None
473
+
474
+
475
+ def get_absorbing_states(state_space):
476
+ """Given a state space, returns the absorbing states
477
+
478
+ Parameters
479
+ -----------
480
+ state_space: numpy.array, a state space
481
+
482
+ Returns
483
+ ---------
484
+ numpy.array, a list of absorbing states, in order"""
485
+
486
+ index_array = get_absorbing_state_index(state_space=state_space)
487
+
488
+ return (
489
+ None
490
+ if index_array is None
491
+ else np.array([state_space[index] for index in index_array])
492
+ )
493
+
494
+
495
+ def get_absorption_probabilities(
496
+ transition_matrix, state_space, exponent_coefficient=50
497
+ ):
498
+ """Given a transition matrix and a corresponding state space
499
+
500
+ generate the absorption probabilities. This does not yet support a
501
+
502
+ symbolic transition matrix input
503
+
504
+ Parameters
505
+ -------------
506
+ state_space: numpy.array, a state space
507
+
508
+ transition matrix: numpy.array, a matrix of transition probabilities corresponding to the state space
509
+
510
+ Returns
511
+ -------------
512
+ Dictionary of values: tuple([starting state]): [[absorbing state 1, absorption probability 1], [absorbing state 2, absorption probability 2]]
513
+ """
514
+
515
+ absorption_index = get_absorbing_state_index(state_space=state_space)
516
+
517
+ absorbing_transition_matrix = np.linalg.matrix_power(
518
+ transition_matrix, exponent_coefficient
519
+ )
520
+
521
+ # TODO this method of getting absorption probabilities will change, but we need to set up benchmarks first
522
+
523
+ absorbing_collums = np.array(
524
+ [absorbing_transition_matrix[:, index] for index in absorption_index]
525
+ )
526
+
527
+ combined_values = np.array(
528
+ [
529
+ np.ravel(np.column_stack((absorption_index, absorbing_collums[:, k])))
530
+ for k, y in enumerate(absorbing_collums.transpose())
531
+ ]
532
+ )
533
+
534
+ return {
535
+ state_index: combined_values[state_index]
536
+ for state_index, state in enumerate(state_space)
537
+ }
538
+
539
+
540
+ def extract_Q(transition_matrix):
541
+ """
542
+ For a transition matrix, compute the corresponding matrix Q
543
+
544
+ Parameters
545
+ ----------
546
+ transition_matrix: numpy.array, the transition matrix
547
+
548
+ Returns
549
+ -------
550
+ np.array, the matrix Q
551
+ """
552
+ indices_without_1_in_diagonal = np.where(transition_matrix.diagonal() != 1)[0]
553
+ Q = transition_matrix[
554
+ indices_without_1_in_diagonal.reshape(-1, 1), indices_without_1_in_diagonal
555
+ ]
556
+ return Q
557
+
558
+
559
+ def extract_R_numerical(transition_matrix):
560
+ """
561
+ For a transition matrix, compute the corresponding matrix R
562
+
563
+ Parameters
564
+ ----------
565
+ transition_matrix: numpy.array, the transition matrix
566
+
567
+ Returns
568
+ ----------
569
+ np.array, the matrix R
570
+ """
571
+
572
+ # TODO merge with symbolic version and Q as function: obtain canonical form
573
+
574
+ absorbing_states = np.isclose(np.diag(transition_matrix), 1.0)
575
+ non_absorbing_states = ~absorbing_states
576
+ R = transition_matrix[np.ix_(non_absorbing_states, absorbing_states)]
577
+
578
+ return R
579
+
580
+
581
+ def extract_R_symbolic(transition_matrix):
582
+
583
+ n = transition_matrix.shape[0]
584
+
585
+ absorbing_states = np.array(
586
+ [sym.simplify(transition_matrix[i, i] - 1) in (0, float(0)) for i in range(n)],
587
+ dtype=bool,
588
+ )
589
+
590
+ non_absorbing_states = ~absorbing_states
591
+
592
+ R = transition_matrix[np.ix_(non_absorbing_states, absorbing_states)]
593
+
594
+ return R
595
+
596
+
597
+ def compute_absorption_matrix(transition_matrix):
598
+ """
599
+ Given a transition matrix, NOT allowing for symbolic values,
600
+
601
+ returns the absorption matrix
602
+
603
+
604
+ Parameters:
605
+ ------------
606
+
607
+ transition_matrix: numpy.array: a transition matrix with no symbolic values
608
+
609
+
610
+ Returns:
611
+ -----------
612
+
613
+ numpy.array: the probability of transitioning from
614
+
615
+ each transitive state (row) to each absorbing state(column).
616
+ """
617
+
618
+ Q = extract_Q(transition_matrix=transition_matrix)
619
+
620
+ R = extract_R_numerical(transition_matrix=transition_matrix)
621
+
622
+ B = scipy.linalg.solve(np.eye(Q.shape[0]) - Q, R)
623
+
624
+ return B
625
+
626
+
627
+ def calculate_absorption_matrix(transition_matrix):
628
+ """
629
+ Given a transition matrix, allowing for symbolic values,
630
+
631
+ returns the absorption matrix
632
+
633
+
634
+ Parameters:
635
+ ------------
636
+
637
+ transition_matrix: numpy.array: a transition matrix allowing for symbolic
638
+
639
+ values, that has at least 1 symbolic value.
640
+
641
+ symbolic: boolean, states whether symbolic values appear in the matrix
642
+
643
+
644
+ Returns:
645
+ -----------
646
+
647
+ sympy.Matrix: the probability of transitioning from
648
+
649
+ each transitive state (row) to each absorbing state(column).
650
+ """
651
+
652
+ Q = extract_Q(transition_matrix=transition_matrix)
653
+
654
+ R = extract_R_symbolic(transition_matrix=transition_matrix)
655
+
656
+ Q_symbolic = sym.Matrix(Q)
657
+ R_symbolic = sym.Matrix(R)
658
+
659
+ identity = sym.eye(Q_symbolic.shape[0])
660
+ B = (identity - Q_symbolic) ** -1 * R_symbolic
661
+
662
+ return sym.Matrix(B)
663
+
664
+ def compute_steady_state(transition_matrix, tolerance=10**-6, initial_dist=None):
665
+ """
666
+ Returns the steady state vector of a given transition matrix that is
667
+ entirely numeric.
668
+
669
+ Parameters
670
+ ----------
671
+ transition_matrix - numpy.array, a transition matrix.
672
+
673
+ tolerance - float. The maximum change when taking next_pi = pi @
674
+ transition_matrix
675
+
676
+ initial_dist - numpy.array: the starting state distribution.
677
+
678
+ Returns
679
+ ----------
680
+ numpy.array - steady state of transition_matrix."""
681
+ N, _ = transition_matrix.shape
682
+ if initial_dist is None:
683
+ pi = np.ones(N) / N
684
+ else:
685
+ pi = initial_dist
686
+ while np.max(np.abs((next_pi := pi @ transition_matrix) - pi)) > tolerance:
687
+ pi = next_pi
688
+ return pi
689
+
690
+
691
+ def calculate_steady_state(transition_matrix):
692
+ """
693
+ Returns the steady state vectors of a given transition matrix. The steady
694
+ state is calculated as the left eigenvector of the transition matrix of a
695
+ Markov chain. This is achieved by noticing that this is equivalent to
696
+ solving $xA = x$ is equivalent to $(A^T - I)x^T = 0$. Thus, we find the
697
+ right-nullspace of $(A^T - I)$.
698
+
699
+ Parameters
700
+ ----------
701
+ transition_matrix - numpy.array or sympy.Matrix, a transition matrix.
702
+
703
+ Returns
704
+ ----------
705
+ numpy.array - steady state of transition_matrix. For the symbolic case,
706
+ this will always be simplified.
707
+ """
708
+ transition_matrix = sym.Matrix(transition_matrix)
709
+
710
+ nullspace = (transition_matrix.T - sym.eye(transition_matrix.rows)).nullspace()
711
+
712
+ try:
713
+ one_eigenvector = nullspace[0]
714
+ except Exception:
715
+ raise ValueError("No eigenvector found")
716
+
717
+ return np.array(sym.simplify(one_eigenvector / sum(one_eigenvector)).T)[0]
718
+
719
+
720
+ def get_neighbourhood_states(state, number_of_strategies):
721
+ """
722
+ Given a state, returns a numpy.array of neighbouring states. That is, the
723
+ set of states which differ at 1 position from the current state.
724
+
725
+ Parameters:
726
+ ------------
727
+
728
+ state: numpy.array - the state to be considered
729
+
730
+ number_of_strategies: int - the number of actions available to each player
731
+
732
+ extrinsic: bool - whether or not the players can only change to a type
733
+ within the state.
734
+
735
+ returns:
736
+ ---------
737
+ numpy.array: the set of states in the neighbourhood of the passed state"""
738
+
739
+ action_set = np.arange(number_of_strategies)
740
+
741
+ neighbours = []
742
+
743
+ for player, player_type in enumerate(state):
744
+ for action in action_set:
745
+ if player_type != action:
746
+ adjacent_state = state.copy()
747
+ adjacent_state[player] = action
748
+ neighbours.append(adjacent_state)
749
+
750
+ return np.array(neighbours)
751
+
752
+
753
+ def simulate_markov_chain(
754
+ initial_state,
755
+ number_of_strategies,
756
+ fitness_function,
757
+ compute_transition_probability,
758
+ seed,
759
+ individual_to_action_mutation_probability=None,
760
+ warmup=0,
761
+ iterations=10000,
762
+ **kwargs,
763
+ ):
764
+ """
765
+ Given an initial state, simulates a markov chain under a fitness function
766
+ and a population dynamic. Moves between neighbouring states and returns the
767
+ sets in the order they were visited in, and the number of times each state
768
+ was visited.
769
+
770
+ Parameters
771
+ -----------
772
+ initial_state: numpy.array - the state that our markov chain is in at $t=0$
773
+
774
+ number_of_strategies: int - the number of actions available to each player
775
+
776
+ fitness_function: func - takes a state and returns the fitness of each
777
+ individual
778
+
779
+ compute_transition_probability: func - takes two states and returns the
780
+ probability of transitioning from one state to another.
781
+
782
+ seed: int - the seed for the random process.
783
+
784
+ individual_to_action_mutation_probability: numpy.array - an N x (number of
785
+ strategies) array where $\mu_{ik}$ gives the probability of player $i$
786
+ mutating to type $k$. By default, set to None, which gives an array of all
787
+ zeros.
788
+
789
+ warmup: int - the number of iterations before we begin tracking the
790
+ distribution of steps
791
+
792
+ iterations: int - the number of iterations in the simulation. By default,
793
+ this is set to 10000
794
+
795
+ returns
796
+ --------
797
+ tuple - (the states in the order visited, the count of how often each state
798
+ was visited)"""
799
+
800
+ np.random.seed(seed)
801
+
802
+ if individual_to_action_mutation_probability is None:
803
+ individual_to_action_mutation_probability = np.zeros(
804
+ shape=(len(initial_state), number_of_strategies)
805
+ )
806
+
807
+ states_over_time = []
808
+ if warmup == 0:
809
+ states_over_time.append(tuple(initial_state))
810
+
811
+ current_state = initial_state
812
+
813
+ state_to_neighourhood_and_transition_probabilities = {}
814
+
815
+ for time_step in range(iterations - 1):
816
+ current_state_key = tuple(current_state.tolist())
817
+ try:
818
+ neighbourhood, transition_probabilities = (
819
+ state_to_neighourhood_and_transition_probabilities[current_state_key]
820
+ )
821
+
822
+ except KeyError:
823
+ neighbourhood = list(
824
+ get_neighbourhood_states(
825
+ state=current_state, number_of_strategies=number_of_strategies
826
+ )
827
+ )
828
+
829
+ kwargs_with_strategies = {
830
+ **kwargs,
831
+ "number_of_strategies": number_of_strategies,
832
+ }
833
+
834
+ transition_probabilities = np.array(
835
+ [
836
+ apply_mutation_probability(
837
+ source=current_state,
838
+ target=next_state,
839
+ transition_probability=compute_transition_probability(
840
+ source=current_state,
841
+ target=next_state,
842
+ fitness_function=fitness_function,
843
+ **kwargs_with_strategies,
844
+ ),
845
+ individual_to_action_mutation_probability=individual_to_action_mutation_probability,
846
+ )
847
+ for next_state in neighbourhood
848
+ ],
849
+ dtype=float,
850
+ )
851
+
852
+ neighbourhood.append(current_state)
853
+ transition_probabilities = np.append(
854
+ transition_probabilities, 1 - transition_probabilities.sum()
855
+ )
856
+
857
+ state_to_neighourhood_and_transition_probabilities[current_state_key] = (
858
+ neighbourhood,
859
+ transition_probabilities,
860
+ )
861
+
862
+ next_state_index = np.random.choice(
863
+ len(neighbourhood), p=transition_probabilities
864
+ )
865
+
866
+ current_state = neighbourhood[next_state_index]
867
+
868
+ if time_step >= warmup - 1:
869
+ states_over_time.append(tuple(current_state.tolist()))
870
+
871
+ state_distribution = collections.Counter(states_over_time)
872
+
873
+ return (states_over_time, state_distribution)