stopro 0.3.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- stopro-0.3.3/PKG-INFO +92 -0
- stopro-0.3.3/README.md +69 -0
- stopro-0.3.3/pyproject.toml +37 -0
- stopro-0.3.3/src/stopro/__init__.py +34 -0
- stopro-0.3.3/src/stopro/_utils.py +171 -0
- stopro-0.3.3/src/stopro/colored_geometric_brownian_motion.py +81 -0
- stopro-0.3.3/src/stopro/colored_replicator.py +110 -0
- stopro-0.3.3/src/stopro/competitive_lotka_volterra.py +154 -0
- stopro-0.3.3/src/stopro/exponential_ornstein_uhlenbeck.py +83 -0
- stopro-0.3.3/src/stopro/geometric_brownian_motion.py +81 -0
- stopro-0.3.3/src/stopro/gillespie_replicator.py +89 -0
- stopro-0.3.3/src/stopro/integrated_ornstein_uhlenbeck.py +76 -0
- stopro-0.3.3/src/stopro/kimura_replicator.py +116 -0
- stopro-0.3.3/src/stopro/moran.py +153 -0
- stopro-0.3.3/src/stopro/ornstein_uhlenbeck.py +183 -0
- stopro-0.3.3/src/stopro/white_replicator.py +45 -0
- stopro-0.3.3/src/stopro/wiener.py +85 -0
stopro-0.3.3/PKG-INFO
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: stopro
|
|
3
|
+
Version: 0.3.3
|
|
4
|
+
Summary: Stochastic processes in Python
|
|
5
|
+
Keywords: stochastic-processes,sde,wiener,ornstein-uhlenbeck
|
|
6
|
+
Author: Dirk Brockmann
|
|
7
|
+
Author-email: Dirk Brockmann <dirk.brockmann@tu-dresden.de>
|
|
8
|
+
License: MIT
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Requires-Dist: numpy>=1.22
|
|
13
|
+
Requires-Dist: scipy>=1.9
|
|
14
|
+
Requires-Dist: jupyter ; extra == 'examples'
|
|
15
|
+
Requires-Dist: matplotlib ; extra == 'examples'
|
|
16
|
+
Requires-Dist: palettable ; extra == 'examples'
|
|
17
|
+
Maintainer: Dirk Brockmann
|
|
18
|
+
Maintainer-email: Dirk Brockmann <dirk.brockmann@tu-dresden.de>
|
|
19
|
+
Requires-Python: >=3.9
|
|
20
|
+
Project-URL: Repository, https://github.com/dirkbrockmann/stopro
|
|
21
|
+
Provides-Extra: examples
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
|
|
24
|
+
# stopro — Elementary Stochastic Processes
|
|
25
|
+
|
|
26
|
+
**stopro** is a small Python library for generating and simulating common
|
|
27
|
+
(multivariate) stochastic processes.
|
|
28
|
+
|
|
29
|
+
Currently included processes:
|
|
30
|
+
|
|
31
|
+
1. Wiener process
|
|
32
|
+
2. Ornstein–Uhlenbeck process
|
|
33
|
+
3. Integrated Ornstein–Uhlenbeck process
|
|
34
|
+
4. Exponential Ornstein–Uhlenbeck process
|
|
35
|
+
5. Geometric Brownian Motion
|
|
36
|
+
6. Colored Geometric Brownian Motion
|
|
37
|
+
7. Gillespie Replicator
|
|
38
|
+
8. Kimura Replicator
|
|
39
|
+
9. White Replicator
|
|
40
|
+
10. Colored Replicator
|
|
41
|
+
11. Multispecies Moran process
|
|
42
|
+
(discrete particle kinetics & diffusion approximation)
|
|
43
|
+
12. Competitive Lotka–Volterra process
|
|
44
|
+
(discrete particle kinetics & diffusion approximation)
|
|
45
|
+
|
|
46
|
+
Examples and documentation are provided as Jupyter notebooks.
|
|
47
|
+
|
|
48
|
+
---
|
|
49
|
+
|
|
50
|
+
## Quick start (recommended)
|
|
51
|
+
|
|
52
|
+
This project uses **uv**, a fast Python package manager and virtual environment
|
|
53
|
+
tool. You need to install uv on your system first. When that's done:
|
|
54
|
+
|
|
55
|
+
```bash
|
|
56
|
+
git clone https://github.com/dirkbrockmann/stopro.git
|
|
57
|
+
cd stopro
|
|
58
|
+
make notebook
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
---
|
|
62
|
+
|
|
63
|
+
## Using stopro in your own code
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
import stopro
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
## Installation via pip
|
|
73
|
+
|
|
74
|
+
Download and install the latest source:
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
pip install git+https://github.com/dirkbrockmann/stopro.git
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
Clone and work with code locally:
|
|
81
|
+
|
|
82
|
+
```bash
|
|
83
|
+
git clone https://github.com/dirkbrockmann/stopro.git
|
|
84
|
+
cd stopro
|
|
85
|
+
pip install -e .
|
|
86
|
+
````
|
|
87
|
+
|
|
88
|
+
---
|
|
89
|
+
|
|
90
|
+
## License
|
|
91
|
+
|
|
92
|
+
MIT
|
stopro-0.3.3/README.md
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# stopro — Elementary Stochastic Processes
|
|
2
|
+
|
|
3
|
+
**stopro** is a small Python library for generating and simulating common
|
|
4
|
+
(multivariate) stochastic processes.
|
|
5
|
+
|
|
6
|
+
Currently included processes:
|
|
7
|
+
|
|
8
|
+
1. Wiener process
|
|
9
|
+
2. Ornstein–Uhlenbeck process
|
|
10
|
+
3. Integrated Ornstein–Uhlenbeck process
|
|
11
|
+
4. Exponential Ornstein–Uhlenbeck process
|
|
12
|
+
5. Geometric Brownian Motion
|
|
13
|
+
6. Colored Geometric Brownian Motion
|
|
14
|
+
7. Gillespie Replicator
|
|
15
|
+
8. Kimura Replicator
|
|
16
|
+
9. White Replicator
|
|
17
|
+
10. Colored Replicator
|
|
18
|
+
11. Multispecies Moran process
|
|
19
|
+
(discrete particle kinetics & diffusion approximation)
|
|
20
|
+
12. Competitive Lotka–Volterra process
|
|
21
|
+
(discrete particle kinetics & diffusion approximation)
|
|
22
|
+
|
|
23
|
+
Examples and documentation are provided as Jupyter notebooks.
|
|
24
|
+
|
|
25
|
+
---
|
|
26
|
+
|
|
27
|
+
## Quick start (recommended)
|
|
28
|
+
|
|
29
|
+
This project uses **uv**, a fast Python package manager and virtual environment
|
|
30
|
+
tool. You need to install uv on your system first. When that's done:
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
git clone https://github.com/dirkbrockmann/stopro.git
|
|
34
|
+
cd stopro
|
|
35
|
+
make notebook
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
---
|
|
39
|
+
|
|
40
|
+
## Using stopro in your own code
|
|
41
|
+
|
|
42
|
+
```python
|
|
43
|
+
import stopro
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
## Installation via pip
|
|
50
|
+
|
|
51
|
+
Download and install the latest source:
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
pip install git+https://github.com/dirkbrockmann/stopro.git
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
Clone and work with code locally:
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
git clone https://github.com/dirkbrockmann/stopro.git
|
|
61
|
+
cd stopro
|
|
62
|
+
pip install -e .
|
|
63
|
+
````
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
## License
|
|
68
|
+
|
|
69
|
+
MIT
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["uv_build>=0.9.26,<0.10.0"]
|
|
3
|
+
build-backend = "uv_build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "stopro"
|
|
7
|
+
version = "0.3.3"
|
|
8
|
+
description = "Stochastic processes in Python"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.9"
|
|
11
|
+
authors = [{ name = "Dirk Brockmann", email = "dirk.brockmann@tu-dresden.de" }]
|
|
12
|
+
maintainers = [{ name = "Dirk Brockmann", email = "dirk.brockmann@tu-dresden.de" }]
|
|
13
|
+
license = { text = "MIT" }
|
|
14
|
+
|
|
15
|
+
dependencies = [
|
|
16
|
+
"numpy>=1.22",
|
|
17
|
+
"scipy>=1.9",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
keywords = ["stochastic-processes", "sde", "wiener", "ornstein-uhlenbeck"]
|
|
21
|
+
classifiers = [
|
|
22
|
+
"Programming Language :: Python :: 3",
|
|
23
|
+
"License :: OSI Approved :: MIT License",
|
|
24
|
+
"Operating System :: OS Independent",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
[project.urls]
|
|
28
|
+
Repository = "https://github.com/dirkbrockmann/stopro"
|
|
29
|
+
|
|
30
|
+
[project.optional-dependencies]
|
|
31
|
+
examples = ["jupyter", "matplotlib", "palettable"]
|
|
32
|
+
|
|
33
|
+
[dependency-groups]
|
|
34
|
+
dev = ["pytest>=7"]
|
|
35
|
+
|
|
36
|
+
[tool.pytest.ini_options]
|
|
37
|
+
testpaths = ["tests"]
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from importlib.metadata import version
|
|
2
|
+
|
|
3
|
+
__version__ = version("stopro")
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"wiener",
|
|
7
|
+
"ornstein_uhlenbeck",
|
|
8
|
+
"kimura_replicator",
|
|
9
|
+
"geometric_brownian_motion",
|
|
10
|
+
"exponential_ornstein_uhlenbeck",
|
|
11
|
+
"integrated_ornstein_uhlenbeck",
|
|
12
|
+
"colored_geometric_brownian_motion",
|
|
13
|
+
"gillespie_replicator",
|
|
14
|
+
"white_replicator",
|
|
15
|
+
"colored_stochastic_replicator",
|
|
16
|
+
"colored_replicator",
|
|
17
|
+
"moran",
|
|
18
|
+
"competitive_lotka_volterra",
|
|
19
|
+
"__version__",
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
from .wiener import wiener
|
|
23
|
+
from .ornstein_uhlenbeck import ornstein_uhlenbeck
|
|
24
|
+
from .kimura_replicator import kimura_replicator
|
|
25
|
+
from .geometric_brownian_motion import geometric_brownian_motion
|
|
26
|
+
from .exponential_ornstein_uhlenbeck import exponential_ornstein_uhlenbeck
|
|
27
|
+
from .integrated_ornstein_uhlenbeck import integrated_ornstein_uhlenbeck
|
|
28
|
+
from .colored_geometric_brownian_motion import colored_geometric_brownian_motion
|
|
29
|
+
from .gillespie_replicator import gillespie_replicator
|
|
30
|
+
from .white_replicator import white_replicator
|
|
31
|
+
from .colored_replicator import colored_replicator as colored_stochastic_replicator
|
|
32
|
+
from .colored_replicator import colored_replicator
|
|
33
|
+
from .moran import moran
|
|
34
|
+
from .competitive_lotka_volterra import competitive_lotka_volterra
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
# Build a uniform time grid on [0, T] from either dt or steps (exactly one must be given).
|
|
4
|
+
# Returns (dt, steps, t) with t including both endpoints 0 and T.
|
|
5
|
+
def _time_grid(T, dt=None, steps=None):
|
|
6
|
+
"""
|
|
7
|
+
Build a uniform grid on [0, T].
|
|
8
|
+
|
|
9
|
+
Provide exactly one of dt or steps.
|
|
10
|
+
- If steps is provided: dt = T/steps.
|
|
11
|
+
- If dt is provided: choose integer steps close to T/dt, then use dt = T/steps
|
|
12
|
+
so the grid ends exactly at T.
|
|
13
|
+
"""
|
|
14
|
+
if (dt is None) == (steps is None):
|
|
15
|
+
raise ValueError("Provide exactly one of dt or steps (not both, not neither).")
|
|
16
|
+
|
|
17
|
+
T = float(T)
|
|
18
|
+
if T <= 0:
|
|
19
|
+
raise ValueError("T must be > 0.")
|
|
20
|
+
|
|
21
|
+
if steps is not None:
|
|
22
|
+
steps = int(steps)
|
|
23
|
+
if steps <= 0:
|
|
24
|
+
raise ValueError("steps must be a positive integer.")
|
|
25
|
+
dt = T / steps
|
|
26
|
+
t = np.linspace(0.0, T, steps + 1)
|
|
27
|
+
return dt, steps, t
|
|
28
|
+
|
|
29
|
+
dt = float(dt)
|
|
30
|
+
if dt <= 0:
|
|
31
|
+
raise ValueError("dt must be > 0.")
|
|
32
|
+
|
|
33
|
+
steps = int(np.round(T / dt))
|
|
34
|
+
steps = max(1, steps)
|
|
35
|
+
|
|
36
|
+
dt = T / steps
|
|
37
|
+
t = np.linspace(0.0, T, steps + 1)
|
|
38
|
+
return dt, steps, t
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# Robustly factor a symmetric PSD matrix C into L such that C ≈ L @ L.T.
|
|
42
|
+
# Uses Cholesky when possible, otherwise falls back to eigen-decomposition (clipping small negatives).
|
|
43
|
+
def _psd_factor(C, *, jitter=0.0, tol=1e-12):
|
|
44
|
+
|
|
45
|
+
C = np.asarray(C, dtype=float)
|
|
46
|
+
C = 0.5 * (C + C.T)
|
|
47
|
+
|
|
48
|
+
if jitter and jitter > 0:
|
|
49
|
+
Cj = C + jitter * np.eye(C.shape[0])
|
|
50
|
+
else:
|
|
51
|
+
Cj = C
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
return np.linalg.cholesky(Cj)
|
|
55
|
+
except np.linalg.LinAlgError:
|
|
56
|
+
w, V = np.linalg.eigh(C)
|
|
57
|
+
w = np.where(w > tol, w, 0.0)
|
|
58
|
+
return V @ np.diag(np.sqrt(w))
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# Normalize "noise specification" into a mixing matrix S with covariance C = S @ S.T.
|
|
62
|
+
# Accepts either covariance (must be symmetric PSD) or a mixing_matrix; otherwise defaults to identity.
|
|
63
|
+
def _mixing(*, N=None, covariance=None, mixing_matrix=None, jitter=1e-12, psd_tol=1e-12, sym_tol=1e-12):
|
|
64
|
+
if covariance is not None and mixing_matrix is not None:
|
|
65
|
+
raise ValueError("Provide only one of covariance or mixing_matrix (not both).")
|
|
66
|
+
|
|
67
|
+
if mixing_matrix is not None:
|
|
68
|
+
S = np.asarray(mixing_matrix, dtype=float)
|
|
69
|
+
if S.ndim != 2:
|
|
70
|
+
raise ValueError("mixing_matrix must be a 2D array.")
|
|
71
|
+
N_res, M = S.shape
|
|
72
|
+
if N is not None and int(N) != N_res:
|
|
73
|
+
raise ValueError(f"N={N} is inconsistent with mixing_matrix.shape[0]={N_res}.")
|
|
74
|
+
C = S @ S.T
|
|
75
|
+
C = 0.5 * (C + C.T)
|
|
76
|
+
return S, C, N_res, M
|
|
77
|
+
|
|
78
|
+
if covariance is not None:
|
|
79
|
+
C = np.asarray(covariance, dtype=float)
|
|
80
|
+
if C.ndim != 2 or C.shape[0] != C.shape[1]:
|
|
81
|
+
raise ValueError("covariance must be a square (N,N) array.")
|
|
82
|
+
|
|
83
|
+
# Require symmetry (within tolerance) for user-provided covariance
|
|
84
|
+
max_asym = float(np.max(np.abs(C - C.T)))
|
|
85
|
+
if max_asym > sym_tol:
|
|
86
|
+
raise ValueError(
|
|
87
|
+
f"covariance must be symmetric; max |C - C.T| = {max_asym:g} exceeds sym_tol={sym_tol:g}."
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
C = 0.5 * (C + C.T)
|
|
91
|
+
|
|
92
|
+
N_res = C.shape[0]
|
|
93
|
+
if N is not None and int(N) != N_res:
|
|
94
|
+
raise ValueError(f"N={N} is inconsistent with covariance.shape[0]={N_res}.")
|
|
95
|
+
|
|
96
|
+
lam_min = float(np.min(np.linalg.eigvalsh(C)))
|
|
97
|
+
if lam_min < -psd_tol:
|
|
98
|
+
raise ValueError(
|
|
99
|
+
f"covariance must be positive semidefinite; min eigenvalue = {lam_min:g} < -psd_tol={psd_tol:g}."
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
S = _psd_factor(C, jitter=jitter, tol=psd_tol)
|
|
103
|
+
M = N_res
|
|
104
|
+
return S, C, N_res, M
|
|
105
|
+
# Neither covariance nor mixing_matrix: default to independent Wiener components
|
|
106
|
+
if N is None:
|
|
107
|
+
raise ValueError("N must be provided when neither covariance nor mixing_matrix is given.")
|
|
108
|
+
N_res = int(N)
|
|
109
|
+
if N_res <= 0:
|
|
110
|
+
raise ValueError("N must be a positive integer.")
|
|
111
|
+
S = np.eye(N_res, dtype=float)
|
|
112
|
+
C = np.eye(N_res, dtype=float)
|
|
113
|
+
M = N_res
|
|
114
|
+
return S, C, N_res, M
|
|
115
|
+
|
|
116
|
+
# Coerce a parameter into a length-N float vector (broadcast scalar; validate 1D shape).
|
|
117
|
+
def _as_vector(x, N, name):
|
|
118
|
+
arr = np.asarray(x, dtype=float)
|
|
119
|
+
if arr.ndim == 0:
|
|
120
|
+
return np.full(int(N), float(arr))
|
|
121
|
+
if arr.shape == (int(N),):
|
|
122
|
+
return arr
|
|
123
|
+
raise ValueError(f"{name} must be a scalar or an array of shape ({N},), got shape {arr.shape}.")
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
# Parse initial_condition into either ("stationary") or an explicit x0 vector of length N.
|
|
127
|
+
# Used by OU/Wiener-like processes to support a convenient stationary start.
|
|
128
|
+
def _parse_initial_condition(initial_condition, *, N):
|
|
129
|
+
if isinstance(initial_condition, str):
|
|
130
|
+
if initial_condition.lower() == "stationary":
|
|
131
|
+
return True, None
|
|
132
|
+
raise ValueError(f"unknown initial_condition '{initial_condition}'")
|
|
133
|
+
|
|
134
|
+
if initial_condition is None:
|
|
135
|
+
return False, np.zeros(int(N), dtype=float)
|
|
136
|
+
|
|
137
|
+
# scalar or vector
|
|
138
|
+
x0 = _as_vector(initial_condition, N, "initial_condition")
|
|
139
|
+
return False, x0
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
# Create an initial condition on the probability simplex (nonnegative entries summing to 1).
|
|
143
|
+
# Accepts None (uniform), scalar (broadcast then normalize), or a length-N vector (validate+normalize).
|
|
144
|
+
def _simplex_initial_condition(initial_condition, *, N, name="initial_condition"):
|
|
145
|
+
|
|
146
|
+
N = int(N)
|
|
147
|
+
if N <= 0:
|
|
148
|
+
raise ValueError("N must be a positive integer.")
|
|
149
|
+
|
|
150
|
+
if initial_condition is None:
|
|
151
|
+
return np.full(N, 1.0 / N, dtype=float)
|
|
152
|
+
|
|
153
|
+
x0 = np.asarray(initial_condition, dtype=float)
|
|
154
|
+
|
|
155
|
+
if x0.ndim == 0:
|
|
156
|
+
c = float(x0)
|
|
157
|
+
if c <= 0:
|
|
158
|
+
raise ValueError(f"{name} scalar must be > 0 to normalize, got {c}.")
|
|
159
|
+
x0 = np.full(N, c, dtype=float)
|
|
160
|
+
|
|
161
|
+
if x0.shape != (N,):
|
|
162
|
+
raise ValueError(f"{name} must be None, a scalar, or an array of shape ({N},), got shape {x0.shape}.")
|
|
163
|
+
|
|
164
|
+
if np.any(x0 < 0):
|
|
165
|
+
raise ValueError(f"{name} must be nonnegative.")
|
|
166
|
+
|
|
167
|
+
s = float(np.sum(x0))
|
|
168
|
+
if s <= 0:
|
|
169
|
+
raise ValueError(f"{name} must have positive sum to normalize.")
|
|
170
|
+
|
|
171
|
+
return x0 / s
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from ._utils import _as_vector
|
|
4
|
+
from .integrated_ornstein_uhlenbeck import integrated_ornstein_uhlenbeck
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def colored_geometric_brownian_motion(
|
|
8
|
+
T,
|
|
9
|
+
dt=None,
|
|
10
|
+
*,
|
|
11
|
+
steps=None,
|
|
12
|
+
gap=1,
|
|
13
|
+
N=1,
|
|
14
|
+
samples=1,
|
|
15
|
+
mu=1.0,
|
|
16
|
+
sigma=1.0,
|
|
17
|
+
tau=1.0,
|
|
18
|
+
initial_condition=None,
|
|
19
|
+
covariance=None,
|
|
20
|
+
mixing_matrix=None,
|
|
21
|
+
):
|
|
22
|
+
"""
|
|
23
|
+
Simulate multivariate colored geometric Brownian motion (cGBM) on [0, T].
|
|
24
|
+
|
|
25
|
+
dX_i(t) = mu_i X_i(t) dt + sigma_i X_i(t) Z_i(t) dt
|
|
26
|
+
tau * dZ_i(t) = -Z_i(t) dt + dW_i(t)
|
|
27
|
+
|
|
28
|
+
Hence,
|
|
29
|
+
X_i(t) = X_i(0) * exp(mu_i t + sigma_i * ∫_0^t Z_i(s) ds).
|
|
30
|
+
|
|
31
|
+
Provide exactly one of `dt` or `steps`. Noise correlation can be specified via
|
|
32
|
+
`covariance` or `mixing_matrix` (mutually exclusive). Use `gap>1` to subsample.
|
|
33
|
+
"""
|
|
34
|
+
tau = float(tau)
|
|
35
|
+
if tau <= 0:
|
|
36
|
+
raise ValueError("tau must be > 0.")
|
|
37
|
+
|
|
38
|
+
N = int(N)
|
|
39
|
+
if N <= 0:
|
|
40
|
+
raise ValueError("N must be a positive integer.")
|
|
41
|
+
|
|
42
|
+
# Choose OU stdev so that: tau*dZ = -Z dt + dW <=> dZ = -(1/tau)Z dt + (1/tau)dW
|
|
43
|
+
# which implies stationary stdev(Z) = 1/sqrt(2*tau).
|
|
44
|
+
stdev_z = 1.0 / np.sqrt(2.0 * tau)
|
|
45
|
+
|
|
46
|
+
res_I = integrated_ornstein_uhlenbeck(
|
|
47
|
+
T,
|
|
48
|
+
dt,
|
|
49
|
+
steps=steps,
|
|
50
|
+
gap=gap,
|
|
51
|
+
N=N,
|
|
52
|
+
samples=samples,
|
|
53
|
+
stdev=stdev_z,
|
|
54
|
+
timescale=tau,
|
|
55
|
+
initial_condition="stationary",
|
|
56
|
+
covariance=covariance,
|
|
57
|
+
mixing_matrix=mixing_matrix,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
I = res_I["X"] # (samples, N, K) = ∫_0^t Z(s) ds (approx.)
|
|
61
|
+
t = res_I["t"] # (K,)
|
|
62
|
+
N_res = res_I["N"]
|
|
63
|
+
|
|
64
|
+
mu = _as_vector(mu, N_res, "mu")
|
|
65
|
+
sigma = _as_vector(sigma, N_res, "sigma")
|
|
66
|
+
|
|
67
|
+
if initial_condition is None:
|
|
68
|
+
x0 = np.ones(N_res, dtype=float)
|
|
69
|
+
else:
|
|
70
|
+
x0 = _as_vector(initial_condition, N_res, "initial_condition")
|
|
71
|
+
|
|
72
|
+
# X_i(t) = x0_i * exp(mu_i t + sigma_i I_i(t))
|
|
73
|
+
tt = t[None, None, :] # (1, 1, K)
|
|
74
|
+
X = x0[None, :, None] * np.exp(mu[None, :, None] * tt + sigma[None, :, None] * I)
|
|
75
|
+
|
|
76
|
+
res_I["X"] = X
|
|
77
|
+
res_I["mu"] = mu
|
|
78
|
+
res_I["sigma"] = sigma
|
|
79
|
+
res_I["tau"] = tau
|
|
80
|
+
res_I["initial_condition"] = x0
|
|
81
|
+
return res_I
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.special import logsumexp
|
|
3
|
+
|
|
4
|
+
from ._utils import _as_vector, _simplex_initial_condition
|
|
5
|
+
from .integrated_ornstein_uhlenbeck import integrated_ornstein_uhlenbeck
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def colored_replicator(
|
|
9
|
+
T,
|
|
10
|
+
dt=None,
|
|
11
|
+
*,
|
|
12
|
+
steps=None,
|
|
13
|
+
N=2,
|
|
14
|
+
mu=1.0,
|
|
15
|
+
sigma=1.0,
|
|
16
|
+
tau=1.0,
|
|
17
|
+
initial_condition=None,
|
|
18
|
+
gap=1,
|
|
19
|
+
samples=1,
|
|
20
|
+
covariance=None,
|
|
21
|
+
mixing_matrix=None,
|
|
22
|
+
):
|
|
23
|
+
r"""
|
|
24
|
+
Simulate the colored stochastic replicator (softmax-normalized colored log-process).
|
|
25
|
+
|
|
26
|
+
Model:
|
|
27
|
+
dX_i(t) = mu_i X_i(t) dt + sigma_i X_i(t) Z_i(t) dt
|
|
28
|
+
tau_i dZ_i(t) = -Z_i(t) dt + dW_i(t)
|
|
29
|
+
|
|
30
|
+
With I_i(t) = ∫_0^t Z_i(s) ds:
|
|
31
|
+
X_i(t) = X_i(0) * exp(mu_i t + sigma_i I_i(t))
|
|
32
|
+
Y_i(t) = X_i(t) / sum_j X_j(t)
|
|
33
|
+
|
|
34
|
+
Notes
|
|
35
|
+
-----
|
|
36
|
+
The underlying OU Z is started at zero (not stationary) to make comparisons to
|
|
37
|
+
Wiener-driven models consistent as tau -> 0.
|
|
38
|
+
|
|
39
|
+
Returns
|
|
40
|
+
-------
|
|
41
|
+
dict with keys including:
|
|
42
|
+
- "X": (samples, N, K) replicator trajectory on the saved grid
|
|
43
|
+
- "t": (K,) time grid (subsampled by gap)
|
|
44
|
+
- plus metadata from integrated_ornstein_uhlenbeck, and "mu","sigma","tau","initial_condition"
|
|
45
|
+
"""
|
|
46
|
+
N = int(N)
|
|
47
|
+
if N < 2:
|
|
48
|
+
raise ValueError(f"colored_replicator requires N>=2, got N={N}.")
|
|
49
|
+
|
|
50
|
+
gap = int(gap)
|
|
51
|
+
if gap <= 0:
|
|
52
|
+
raise ValueError("gap must be a positive integer.")
|
|
53
|
+
|
|
54
|
+
samples = int(samples)
|
|
55
|
+
if samples <= 0:
|
|
56
|
+
raise ValueError("samples must be a positive integer.")
|
|
57
|
+
|
|
58
|
+
mu = _as_vector(mu, N, "mu")
|
|
59
|
+
sigma = _as_vector(sigma, N, "sigma")
|
|
60
|
+
tau = _as_vector(tau, N, "tau")
|
|
61
|
+
if np.any(tau <= 0):
|
|
62
|
+
raise ValueError("tau must be > 0 (component-wise).")
|
|
63
|
+
|
|
64
|
+
# Replicator starts on simplex
|
|
65
|
+
x0 = _simplex_initial_condition(initial_condition, N=N)
|
|
66
|
+
|
|
67
|
+
# Choose OU stdev so that tau dZ = -Z dt + dW <=> stationary stdev(Z) = 1/sqrt(2*tau)
|
|
68
|
+
# (even though we start Z(0)=0, this keeps the intended tau-scaling of the colored noise)
|
|
69
|
+
stdev_z = 1.0 / np.sqrt(2.0 * tau)
|
|
70
|
+
|
|
71
|
+
# I(t) = ∫ Z ds (computed on fine grid internally; returned on gap-grid)
|
|
72
|
+
# We intentionally start OU at zero for comparability (initial_condition=None).
|
|
73
|
+
res_I = integrated_ornstein_uhlenbeck(
|
|
74
|
+
T,
|
|
75
|
+
dt,
|
|
76
|
+
steps=steps,
|
|
77
|
+
gap=gap,
|
|
78
|
+
N=N,
|
|
79
|
+
samples=samples,
|
|
80
|
+
stdev=stdev_z,
|
|
81
|
+
timescale=tau,
|
|
82
|
+
initial_condition=None,
|
|
83
|
+
covariance=covariance,
|
|
84
|
+
mixing_matrix=mixing_matrix,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
I = res_I["X"] # (samples, N, K)
|
|
88
|
+
t = res_I["t"] # (K,)
|
|
89
|
+
|
|
90
|
+
# log X_i(t) = log x0_i + mu_i t + sigma_i I_i(t)
|
|
91
|
+
# Normalize with logsumexp for numerical stability.
|
|
92
|
+
with np.errstate(divide="ignore"):
|
|
93
|
+
logx0 = np.where(x0 > 0, np.log(x0), -np.inf) # (N,)
|
|
94
|
+
|
|
95
|
+
logX = (
|
|
96
|
+
logx0[None, :, None]
|
|
97
|
+
+ mu[None, :, None] * t[None, None, :]
|
|
98
|
+
+ sigma[None, :, None] * I
|
|
99
|
+
) # (samples, N, K)
|
|
100
|
+
|
|
101
|
+
log_denom = logsumexp(logX, axis=1, keepdims=True) # (samples, 1, K)
|
|
102
|
+
Y = np.exp(logX - log_denom) # (samples, N, K)
|
|
103
|
+
|
|
104
|
+
res = dict(res_I)
|
|
105
|
+
res["X"] = Y
|
|
106
|
+
res["mu"] = mu
|
|
107
|
+
res["sigma"] = sigma
|
|
108
|
+
res["tau"] = tau
|
|
109
|
+
res["initial_condition"] = x0
|
|
110
|
+
return res
|