tsdynamics 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. tsdynamics/__init__.py +3 -0
  2. tsdynamics/_version.py +24 -0
  3. tsdynamics/base/__init__.py +6 -0
  4. tsdynamics/base/base.py +87 -0
  5. tsdynamics/base/dde_base.py +322 -0
  6. tsdynamics/base/map_base.py +139 -0
  7. tsdynamics/base/ode_base.py +261 -0
  8. tsdynamics/systems/__init__.py +3 -0
  9. tsdynamics/systems/continuous/__init__.py +313 -0
  10. tsdynamics/systems/continuous/chaotic_attractors.py +1081 -0
  11. tsdynamics/systems/continuous/chem_bio_systems.py +416 -0
  12. tsdynamics/systems/continuous/climate_geophysics.py +209 -0
  13. tsdynamics/systems/continuous/coupled_systems.py +306 -0
  14. tsdynamics/systems/continuous/delayed_systems.py +63 -0
  15. tsdynamics/systems/continuous/exotic_systems.py +205 -0
  16. tsdynamics/systems/continuous/neural_cognitive.py +83 -0
  17. tsdynamics/systems/continuous/oscillatory_systems.py +202 -0
  18. tsdynamics/systems/continuous/physical_systems.py +225 -0
  19. tsdynamics/systems/continuous/population_dynamics.py +81 -0
  20. tsdynamics/systems/discrete/__init__.py +89 -0
  21. tsdynamics/systems/discrete/chaotic_maps.py +210 -0
  22. tsdynamics/systems/discrete/exotic_maps.py +154 -0
  23. tsdynamics/systems/discrete/geometric_maps.py +87 -0
  24. tsdynamics/systems/discrete/polynomial_maps.py +57 -0
  25. tsdynamics/systems/discrete/population_maps.py +53 -0
  26. tsdynamics/utils/__init__.py +11 -0
  27. tsdynamics/utils/curvature_dt.py +147 -0
  28. tsdynamics/utils/frequency_dt.py +236 -0
  29. tsdynamics/utils/general.py +15 -0
  30. tsdynamics/utils/sagitta_dt.py +493 -0
  31. tsdynamics/viz/__init__.py +14 -0
  32. tsdynamics/viz/animators.py +137 -0
  33. tsdynamics/viz/base.py +48 -0
  34. tsdynamics/viz/plotters.py +372 -0
  35. tsdynamics/viz/transforms.py +287 -0
  36. tsdynamics-0.1.0.dist-info/METADATA +281 -0
  37. tsdynamics-0.1.0.dist-info/RECORD +39 -0
  38. tsdynamics-0.1.0.dist-info/WHEEL +4 -0
  39. tsdynamics-0.1.0.dist-info/licenses/LICENSE +201 -0
tsdynamics/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ from . import base, systems, utils, viz
2
+
3
+ __all__ = ["base", "utils", "systems", "viz"]
tsdynamics/_version.py ADDED
@@ -0,0 +1,24 @@
1
+ # file generated by vcs-versioning
2
+ # don't change, don't track in version control
3
+ from __future__ import annotations
4
+
5
+ __all__ = [
6
+ "__version__",
7
+ "__version_tuple__",
8
+ "version",
9
+ "version_tuple",
10
+ "__commit_id__",
11
+ "commit_id",
12
+ ]
13
+
14
+ version: str
15
+ __version__: str
16
+ __version_tuple__: tuple[int | str, ...]
17
+ version_tuple: tuple[int | str, ...]
18
+ commit_id: str | None
19
+ __commit_id__: str | None
20
+
21
+ __version__ = version = '0.1.0'
22
+ __version_tuple__ = version_tuple = (0, 1, 0)
23
+
24
+ __commit_id__ = commit_id = None
@@ -0,0 +1,6 @@
1
+ from .base import BaseDyn
2
+ from .dde_base import DynSysDelay
3
+ from .map_base import DynMap
4
+ from .ode_base import DynSys
5
+
6
+ __all__ = ["BaseDyn", "DynSys", "DynMap", "DynSysDelay"]
@@ -0,0 +1,87 @@
1
+ from typing import Any, Optional
2
+
3
+ import numpy as np
4
+
5
+
6
+ class BaseDyn:
7
+ """Abstract base class for all dynamical systems."""
8
+
9
+ def __init__(self, n_dim=None, params=None, initial_conds=None) -> None:
10
+ self.n_dim = n_dim if n_dim is not None else getattr(self, "n_dim", None)
11
+ self.params = (
12
+ dict(params) if params is not None else dict(getattr(self, "params", {}) or {})
13
+ )
14
+ if initial_conds is not None:
15
+ self.initial_conds = initial_conds
16
+ else:
17
+ self.initial_conds = getattr(self, "initial_conds", None)
18
+
19
+ # Make the parameters available as attributes
20
+ if self.params:
21
+ for key, value in self.params.items():
22
+ setattr(self, key, value)
23
+
24
+ def __setattr__(self, name: str, value: Any) -> None:
25
+ """Set an attribute and add it to the parameters dictionary."""
26
+ params = self.__dict__.get("params")
27
+ if isinstance(params, dict) and name in params:
28
+ params[name] = value
29
+ object.__setattr__(self, name, value)
30
+
31
+ def generate_timesteps(
32
+ self,
33
+ dt: float = 0.02,
34
+ steps: Optional[int] = None,
35
+ final_time: Optional[float] = 1.0,
36
+ ) -> np.ndarray:
37
+ """
38
+ Generate a sequence of time steps for a given simulation or time series.
39
+
40
+ Parameters
41
+ ----------
42
+ dt : float, optional
43
+ The time step size between consecutive time points. Default is 0.02.
44
+ steps : int, optional
45
+ The number of steps (i.e., points) to generate. If provided, this
46
+ takes precedence over ``final_time``.
47
+ final_time : float, optional
48
+ The final simulation time. Used only if ``steps`` is not provided.
49
+ Default is 1.0.
50
+
51
+ Returns
52
+ -------
53
+ timesteps : ndarray of float64
54
+ A 1D NumPy array containing the generated time steps.
55
+
56
+ Raises
57
+ ------
58
+ ValueError
59
+ If neither ``steps`` nor ``final_time`` is provided.
60
+
61
+ Notes
62
+ -----
63
+ ``steps`` takes precedence over ``final_time``. If both are given, the
64
+ ``steps`` value is used, and a warning message is printed. The returned
65
+ array always includes the final point (either ``steps * dt`` or
66
+ ``final_time``).
67
+
68
+ Examples
69
+ --------
70
+ >>> obj.generate_timesteps(dt=0.1, steps=5)
71
+ array([0. , 0.1, 0.2, 0.3, 0.4, 0.5])
72
+
73
+ >>> obj.generate_timesteps(dt=0.1, final_time=0.4)
74
+ array([0. , 0.1, 0.2, 0.3, 0.4])
75
+ """
76
+ if steps is None:
77
+ if final_time is None:
78
+ raise ValueError("Either 'steps' or 'final_time' must be provided.")
79
+ ts = np.arange(0.0, final_time, dt)
80
+ if ts.size == 0 or ts[-1] < final_time - 1e-12:
81
+ ts = np.append(ts, final_time)
82
+ else:
83
+ if final_time is not None:
84
+ print("Both 'steps' and 'final_time' are given. Using 'steps' instead.")
85
+ ts = np.arange(0.0, steps * dt, dt)
86
+ ts = np.append(ts, steps * dt)
87
+ return ts
@@ -0,0 +1,322 @@
1
+ import warnings
2
+ from abc import ABC, abstractmethod
3
+ from typing import Callable, Optional, Sequence, Tuple
4
+
5
+ import numpy as np
6
+ from jitcdde import jitcdde, jitcdde_lyap, t, y
7
+
8
+ from .base import BaseDyn
9
+
10
+ warnings.filterwarnings(
11
+ "ignore",
12
+ message=".*target time is smaller than the current time.*",
13
+ category=UserWarning,
14
+ )
15
+
16
+
17
+ class DynSysDelay(BaseDyn, ABC):
18
+ """
19
+ Base class for delay differential systems (DDEs) using jitcdde.
20
+
21
+ Subclasses implement `_rhs(y, t, **params)` and return a list/tuple
22
+ of length `n_dim` with expressions built from jitcdde's `y(i, time)` and `t`.
23
+ """
24
+
25
+ # --------- Interface similar to DynSys ---------
26
+ def rhs(self, y_sym, t_sym):
27
+ """
28
+ Wrapper to pass parameters into subclass rhs.
29
+ Returns a tuple/list of expressions of length n_dim.
30
+ """
31
+ return self._rhs(y_sym, t_sym, **self.params)
32
+
33
+ @abstractmethod
34
+ def _rhs(self, y_sym, t_sym, **params):
35
+ """
36
+ Provide the DDE right-hand side as jitcdde expressions.
37
+
38
+ Parameters
39
+ ----------
40
+ y_sym : callable
41
+ jitcdde symbol: y(index, time)
42
+ t_sym : symbol
43
+ jitcdde symbol for time t
44
+ **params : dict
45
+ Parameters made available as attributes and kwargs.
46
+
47
+ Returns
48
+ -------
49
+ Sequence of length n_dim with expressions referencing y_sym(., t_sym) and delays.
50
+ """
51
+ raise NotImplementedError
52
+
53
+ # --------- DDE integration (jitcdde) ---------
54
+ def integrate(
55
+ self,
56
+ dt: float = 0.02,
57
+ steps: Optional[int] = None,
58
+ final_time: float = 100.0,
59
+ initial_conds: Optional[Sequence[float]] = None,
60
+ rtol: float = 1e-3,
61
+ atol: float = 1e-3,
62
+ history: Optional[Callable[[float], Sequence[float]]] = None,
63
+ **kwargs,
64
+ ) -> Tuple[np.ndarray, np.ndarray]:
65
+ """
66
+ Integrate the DDE system with adaptive control via jitcdde.
67
+
68
+ Parameters
69
+ ----------
70
+ dt : float
71
+ Output sampling step for returned trajectory (not the internal stepper step).
72
+ steps : int, optional
73
+ Number of output points; if given, overrides final_time.
74
+ final_time : float
75
+ Final simulation time if `steps` is not provided.
76
+ initial_conds : sequence of float, optional
77
+ Used to set a constant past if `history` is None.
78
+ rtol, atol : float
79
+ Relative and absolute tolerances passed to jitcdde.
80
+ history : callable, optional
81
+ Function h(s) -> sequence at time s (s ≤ 0) to define the past.
82
+ If None, a constant past is used from `initial_conds`.
83
+ **kwargs :
84
+ Passed to `set_integration_parameters` (e.g., max_step, first_step, min_step).
85
+
86
+ Returns
87
+ -------
88
+ t_eval : ndarray, shape (m,)
89
+ Output times.
90
+ y_eval : ndarray, shape (m, n_dim)
91
+ Solution values at `t_eval`.
92
+
93
+ Raises
94
+ ------
95
+ ValueError
96
+ If shapes/params are inconsistent.
97
+ """
98
+ # Determine dimensions and initial conditions / past
99
+ if initial_conds is None:
100
+ if self.initial_conds is None:
101
+ initial_conds = np.random.rand(self.n_dim)
102
+ initial_conds = np.asarray(initial_conds, float).reshape(self.n_dim)
103
+ self.initial_conds = np.array(initial_conds, copy=True)
104
+ else:
105
+ self.initial_conds = np.array(initial_conds, copy=True)
106
+
107
+ # Output grid
108
+ t_eval = self.generate_timesteps(dt=dt, steps=steps, final_time=final_time)
109
+ if t_eval[0] < 0.0:
110
+ raise ValueError("DDE integration requires nonnegative output times (t >= 0).")
111
+
112
+ # Build jitcdde system
113
+ rhs = tuple(self.rhs(y, t))
114
+ if len(rhs) != self.n_dim:
115
+ raise ValueError(f"_rhs must return length {self.n_dim}, got {len(rhs)}")
116
+
117
+ dde = jitcdde(rhs)
118
+
119
+ # Past / history
120
+ if history is None:
121
+ dde.constant_past(self.initial_conds)
122
+ hist0 = self.initial_conds
123
+ else:
124
+ # history(s) must return a sequence of length n_dim for any s ≤ 0
125
+ def _hist(s: float) -> np.ndarray:
126
+ return np.asarray(history(s), dtype=float).reshape(self.n_dim)
127
+
128
+ dde.past_from_function(_hist)
129
+ hist0 = _hist(0.0)
130
+
131
+ # Tolerances and optional steps
132
+ dde.set_integration_parameters(rtol=rtol, atol=atol, **kwargs)
133
+
134
+ # If you are confident your history is compatible and want silence:
135
+ dde.initial_discontinuities_handled = True
136
+
137
+ # Integrate to requested times
138
+ y_out = np.empty((t_eval.size, self.n_dim), dtype=float)
139
+
140
+ # t=0 value: use the history at 0
141
+ y_out[0] = hist0
142
+ i0 = 1
143
+
144
+ # march forward
145
+ for k in range(i0, t_eval.size):
146
+ tk = float(t_eval[k])
147
+ y_out[k] = dde.integrate(tk)
148
+
149
+ return t_eval, y_out
150
+
151
+ def lyapunov_spectrum(
152
+ self,
153
+ dt: float = 0.1,
154
+ final_time: float = 200.0,
155
+ initial_conds: Optional[Sequence[float]] = None,
156
+ n_lyap: int = 1, # user chooses how many (DDEs have infinitely many)
157
+ history: Optional[Callable[[float], Sequence[float]]] = None,
158
+ burn_in: float = 50.0,
159
+ rtol: float = 1e-6,
160
+ atol: float = 1e-9,
161
+ **integration_kwargs,
162
+ ) -> np.ndarray:
163
+ """
164
+ Estimate the first ``n_lyap`` Lyapunov exponents of a DDE using
165
+ :func:`jitcdde.jitcdde_lyap`.
166
+
167
+ This integrates the delay system together with ``n_lyap`` separation functions.
168
+ At each sampling time, JiTCDDE returns *local* exponents and a **weight**
169
+ (the effective integration time they represent). The reported spectrum is the
170
+ weight-averaged mean of those local values after an optional burn-in.
171
+
172
+ Parameters
173
+ ----------
174
+ dt : float, optional
175
+ Sampling interval (in integration time units) at which local exponents are
176
+ requested. This does **not** constrain the adaptive internal stepper.
177
+ Default is ``0.1``.
178
+ final_time : float, optional
179
+ Length of the averaging window *after* burn-in. Default is ``200.0``.
180
+ initial_conds : sequence of float, optional
181
+ Used to define a constant past if ``history`` is not provided. Shape
182
+ ``(n_dim,)`` at time ``t=0``. If omitted, uses ``self.initial_conds`` if
183
+ available, else random ``U[0,1)``.
184
+ n_lyap : int, optional
185
+ Number of leading Lyapunov exponents to estimate (DDEs have infinitely many).
186
+ Default is ``1``.
187
+ history : callable, optional
188
+ Function ``h(s) -> sequence`` defining the past for ``s <= 0``. If omitted,
189
+ a constant past equal to ``initial_conds`` is used.
190
+ burn_in : float, optional
191
+ Time to discard before averaging (aligns separation functions). Default
192
+ ``50.0``.
193
+ rtol : float, optional
194
+ Relative tolerance for JiTCDDE. Default ``1e-6``.
195
+ atol : float, optional
196
+ Absolute tolerance for JiTCDDE. Default ``1e-9``.
197
+ **integration_kwargs
198
+ Additional keyword args forwarded to
199
+ :meth:`jitcdde.jitcdde.set_integration_parameters` (e.g., ``max_step``,
200
+ ``first_step``).
201
+
202
+ Returns
203
+ -------
204
+ exponents : (n_lyap,) ndarray of float
205
+ Estimated weight-averaged Lyapunov exponents (largest first, as produced
206
+ by JiTCDDE).
207
+
208
+ Raises
209
+ ------
210
+ ValueError
211
+ If ``n_dim`` is not set, or the subclass ``_rhs`` returns the wrong length.
212
+
213
+ Notes
214
+ -----
215
+ - This method sets the past (constant or from ``history``), calls
216
+ :meth:`jitcdde.jitcdde_lyap.step_on_discontinuities`, and then starts
217
+ sampling from ``dde.t`` as recommended in the JiTCDDE docs.
218
+ - Each call to ``jitcdde_lyap.integrate(T)`` returns a tuple
219
+ ``(state, local_lyaps, weight)``. **You must use** the returned ``weight``
220
+ when averaging local exponents; it may be zero if no real integration
221
+ occurred between two sampling times.
222
+ - Avoid histories that place the system exactly at an equilibrium (they can
223
+ yield exponents near zero). For Mackey–Glass, for example, a strictly
224
+ constant past at the fixed point produces trivial dynamics.
225
+
226
+ Examples
227
+ --------
228
+ >>> mg = MackeyGlass() # beta=0.2, gamma=0.1, tau=17, n=10 by default
229
+ >>> # Provide a nontrivial past (constant 1.0 is an equilibrium here):
230
+ >>> hist = lambda s: [1.0 + 0.1*np.sin(0.2*s)]
231
+ >>> exps = mg.lyapunov_spectrum(n_lyap=2, dt=0.2, burn_in=100.0, final_time=300.0,
232
+ ... history=hist, rtol=1e-8, atol=1e-10)
233
+ >>> exps # doctest: +SKIP
234
+ array([ 2.8e-03, -... ])
235
+ """
236
+ if self.n_dim is None:
237
+ raise ValueError("n_dim must be set.")
238
+
239
+ # Past / ICs
240
+ if initial_conds is None:
241
+ if self.initial_conds is None:
242
+ initial_conds = np.random.rand(self.n_dim)
243
+ initial_conds = np.asarray(initial_conds, float).reshape(self.n_dim)
244
+ self.initial_conds = np.array(initial_conds, copy=True)
245
+ else:
246
+ initial_conds = self.initial_conds
247
+
248
+ # Build symbolic field
249
+ f = tuple(self.rhs(y, t))
250
+ if len(f) != self.n_dim:
251
+ raise ValueError(f"_rhs must return length {self.n_dim}, got {len(f)}")
252
+
253
+ dde = jitcdde_lyap(f, n_lyap=n_lyap)
254
+ dde.set_integration_parameters(rtol=rtol, atol=atol, **integration_kwargs)
255
+
256
+ if history is None:
257
+ dde.constant_past(initial_conds)
258
+ else:
259
+ # past_from_function is broken with jitcdde_lyap: chspy.from_function sets
260
+ # .happy attributes on Anchor objects, but jitcdde.Past.prepare_anchor
261
+ # recreates every Anchor (to expand tangent-vector dimensions) and the
262
+ # new object loses .happy, causing AttributeError inside from_function.
263
+ # Fix: patch prepare_anchor to forward .happy through the reconstruction.
264
+ from jitcdde.past import Past as _Past
265
+
266
+ _orig_prepare = _Past.prepare_anchor
267
+
268
+ def _prepare_preserving_happy(self_past, x):
269
+ result = _orig_prepare(self_past, x)
270
+ if hasattr(x, "happy"):
271
+ result.happy = x.happy
272
+ return result
273
+
274
+ _Past.prepare_anchor = _prepare_preserving_happy
275
+ try:
276
+ dde.past_from_function(lambda s: np.asarray(history(s), float).reshape(self.n_dim))
277
+ finally:
278
+ _Past.prepare_anchor = _orig_prepare
279
+
280
+ # Handle initial discontinuities; start sampling from dde.t afterwards
281
+ dde.step_on_discontinuities()
282
+
283
+ # Burn-in: align separation functions (discard output)
284
+ T_end_burn = float(dde.t) + max(0.0, burn_in)
285
+ while dde.t < T_end_burn:
286
+ Tn = min(T_end_burn, dde.t + dt)
287
+ _ = dde.integrate(Tn) # returns (state, local_lyaps, weight)
288
+
289
+ # Production: weight-average the local LEs using the returned weights
290
+ T_end = float(dde.t) + final_time
291
+ weights = []
292
+ ly_steps = []
293
+ prev_time = float(dde.t)
294
+
295
+ while dde.t < T_end:
296
+ Tn = min(T_end, dde.t + dt)
297
+ ret = dde.integrate(Tn) # expected: (state, local_lyaps, weight)
298
+ if not isinstance(ret, tuple):
299
+ raise RuntimeError("jitcdde_lyap.integrate did not return a tuple")
300
+ if len(ret) >= 3:
301
+ _, local_lyaps, w = ret
302
+ weight = float(w)
303
+ else:
304
+ # Fallback (shouldn't happen): use elapsed time as weight
305
+ _, local_lyaps = ret
306
+ weight = Tn - prev_time
307
+
308
+ v = np.asarray(local_lyaps, float).reshape(-1)
309
+ if v.size != n_lyap:
310
+ raise ValueError(f"Expected {n_lyap} local LEs, got {v.shape}")
311
+
312
+ ly_steps.append(v)
313
+ weights.append(weight)
314
+ prev_time = float(dde.t)
315
+
316
+ W = np.asarray(weights, float)
317
+ mask = W > 0.0
318
+ if not np.any(mask):
319
+ return np.zeros(n_lyap)
320
+ L = np.vstack([ly_steps[i] for i, m in enumerate(mask) if m])
321
+ exponents = (W[mask, None] * L).sum(axis=0) / W[mask].sum()
322
+ return exponents
@@ -0,0 +1,139 @@
1
+ from abc import abstractmethod
2
+
3
+ import numpy as np
4
+
5
+ from .base import BaseDyn
6
+
7
+
8
+ class DynMap(BaseDyn):
9
+ """Class for discrete maps."""
10
+
11
+ def rhs(self, X):
12
+ X = np.asarray(X, dtype=np.float64)
13
+ params = tuple(float(v) for v in self.params.values())
14
+ # Call with positional args only
15
+ out = self._rhs(X, *params)
16
+ return np.asarray(out, dtype=np.float64)
17
+
18
+ def jac(self, X):
19
+ X = np.asarray(X, dtype=np.float64)
20
+ params = tuple(float(v) for v in self.params.values())
21
+ out = self._jac(X, *params)
22
+ return np.asarray(out, dtype=np.float64)
23
+
24
+ @abstractmethod
25
+ def _rhs(self, X, **params):
26
+ """Right-hand side function to be implemented by subclasses."""
27
+ raise NotImplementedError
28
+
29
+ @abstractmethod
30
+ def _jac(self, X, **params):
31
+ """Jacobian function to be implemented by subclasses."""
32
+ raise NotImplementedError
33
+
34
+ def iterate(self, initial_conds=None, steps=1000, max_retries=10):
35
+ """Iterate the map for n_steps starting from initial_conds."""
36
+ retries = 0
37
+
38
+ while retries < max_retries:
39
+ if initial_conds is None:
40
+ if self.initial_conds is None:
41
+ initial_conds = np.random.rand(self.n_dim)
42
+ initial_conds = np.asarray(initial_conds, float).reshape(self.n_dim)
43
+ self.initial_conds = np.array(initial_conds, copy=True)
44
+ else:
45
+ self.initial_conds = np.array(initial_conds, copy=True)
46
+
47
+ y = np.atleast_1d(self.initial_conds)
48
+
49
+ trajectory = np.empty((steps, y.size))
50
+
51
+ try:
52
+ for i in range(steps):
53
+ y = self.rhs(y)
54
+ if np.any(np.isnan(y)) or np.any(np.isinf(y)):
55
+ raise ValueError(f"The trajectory diverged at step {i}: y = {y}")
56
+ trajectory[i] = np.atleast_1d(y)
57
+ time = np.arange(steps)
58
+ return time, trajectory
59
+
60
+ except ValueError as e:
61
+ print(f"Warning: {e}. Retrying with a new random initial condition.")
62
+ initial_conds = None
63
+ self.initial_conds = None
64
+ retries += 1
65
+
66
+ raise ValueError(f"Failed to iterate the map after {max_retries} retries")
67
+
68
+ def lyapunov_spectrum(
69
+ self,
70
+ y0=None,
71
+ steps=1000,
72
+ num_exponents=None,
73
+ perturbation_scale=1e-8,
74
+ reorthonormalize_interval=1,
75
+ ):
76
+ """
77
+ Compute the Lyapunov exponents of the map.
78
+
79
+ Args:
80
+ y0 (array): Initial condition for the state variables.
81
+ steps (int): Number of iterations.
82
+ num_exponents (int): Number of Lyapunov exponents to compute.
83
+ perturbation_scale (float): Initial scale of perturbation vectors.
84
+ reorthonormalize_interval (int): Steps between reorthonormalizations.
85
+ max_retries (int): Maximum number of retries with new initial conditions in case of divergence.
86
+
87
+ Returns:
88
+ exponents (array): Array of Lyapunov exponents.
89
+ """
90
+ if y0 is None:
91
+ if self.n_dim is None:
92
+ raise ValueError("Initial conditions must be provided, else n_dim must be set")
93
+ y0 = np.random.rand(self.n_dim) # if self.n_dim > 1 else np.random.rand()
94
+ else:
95
+ y0 = np.asarray(y0)
96
+
97
+ if num_exponents is None:
98
+ num_exponents = self.n_dim
99
+
100
+ n_dim = self.n_dim
101
+
102
+ # Initialize the state and perturbation vectors
103
+ state = y0.copy()
104
+ perturbations = np.eye(n_dim)[:num_exponents] * perturbation_scale
105
+
106
+ # Accumulate logarithms of stretching factors
107
+ lyapunov_sums = np.zeros(num_exponents)
108
+ total_intervals = 0
109
+
110
+ _, states = self.iterate(
111
+ state, steps
112
+ ) # Compute the trajectory with integrate to avoid the nans or infs
113
+
114
+ for step in range(steps):
115
+ # Map the state
116
+ state = states[step]
117
+
118
+ # Compute the Jacobian at the current state
119
+ J = np.array(self.jac(state))
120
+
121
+ # Update perturbations
122
+ perturbations = np.dot(J, perturbations.T).T
123
+
124
+ # Reorthonormalization
125
+ if (step + 1) % reorthonormalize_interval == 0:
126
+ Q, R = np.linalg.qr(perturbations.T)
127
+ perturbations = Q.T
128
+
129
+ # Accumulate the logarithms of the absolute values of the diagonal elements of R
130
+ # Guard against exact zeros on the diagonal (e.g. stable fixed-point trajectories)
131
+ diag_abs = np.abs(np.diag(R))
132
+ diag_abs = np.where(diag_abs == 0.0, np.finfo(float).tiny, diag_abs)
133
+ lyapunov_sums += np.log(diag_abs)
134
+ total_intervals += 1
135
+
136
+ # Compute the Lyapunov exponents
137
+ exponents = lyapunov_sums / (total_intervals * reorthonormalize_interval)
138
+
139
+ return exponents