derivkit 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- derivkit/__init__.py +22 -0
- derivkit/calculus/__init__.py +17 -0
- derivkit/calculus/calculus_core.py +152 -0
- derivkit/calculus/gradient.py +97 -0
- derivkit/calculus/hessian.py +528 -0
- derivkit/calculus/hyper_hessian.py +296 -0
- derivkit/calculus/jacobian.py +156 -0
- derivkit/calculus_kit.py +128 -0
- derivkit/derivative_kit.py +315 -0
- derivkit/derivatives/__init__.py +6 -0
- derivkit/derivatives/adaptive/__init__.py +5 -0
- derivkit/derivatives/adaptive/adaptive_fit.py +238 -0
- derivkit/derivatives/adaptive/batch_eval.py +179 -0
- derivkit/derivatives/adaptive/diagnostics.py +325 -0
- derivkit/derivatives/adaptive/grid.py +333 -0
- derivkit/derivatives/adaptive/polyfit_utils.py +513 -0
- derivkit/derivatives/adaptive/spacing.py +66 -0
- derivkit/derivatives/adaptive/transforms.py +245 -0
- derivkit/derivatives/autodiff/__init__.py +1 -0
- derivkit/derivatives/autodiff/jax_autodiff.py +95 -0
- derivkit/derivatives/autodiff/jax_core.py +217 -0
- derivkit/derivatives/autodiff/jax_utils.py +146 -0
- derivkit/derivatives/finite/__init__.py +5 -0
- derivkit/derivatives/finite/batch_eval.py +91 -0
- derivkit/derivatives/finite/core.py +84 -0
- derivkit/derivatives/finite/extrapolators.py +511 -0
- derivkit/derivatives/finite/finite_difference.py +247 -0
- derivkit/derivatives/finite/stencil.py +206 -0
- derivkit/derivatives/fornberg.py +245 -0
- derivkit/derivatives/local_polynomial_derivative/__init__.py +1 -0
- derivkit/derivatives/local_polynomial_derivative/diagnostics.py +90 -0
- derivkit/derivatives/local_polynomial_derivative/fit.py +199 -0
- derivkit/derivatives/local_polynomial_derivative/local_poly_config.py +95 -0
- derivkit/derivatives/local_polynomial_derivative/local_polynomial_derivative.py +205 -0
- derivkit/derivatives/local_polynomial_derivative/sampling.py +72 -0
- derivkit/derivatives/tabulated_model/__init__.py +1 -0
- derivkit/derivatives/tabulated_model/one_d.py +247 -0
- derivkit/forecast_kit.py +783 -0
- derivkit/forecasting/__init__.py +1 -0
- derivkit/forecasting/dali.py +78 -0
- derivkit/forecasting/expansions.py +486 -0
- derivkit/forecasting/fisher.py +298 -0
- derivkit/forecasting/fisher_gaussian.py +171 -0
- derivkit/forecasting/fisher_xy.py +357 -0
- derivkit/forecasting/forecast_core.py +313 -0
- derivkit/forecasting/getdist_dali_samples.py +429 -0
- derivkit/forecasting/getdist_fisher_samples.py +235 -0
- derivkit/forecasting/laplace.py +259 -0
- derivkit/forecasting/priors_core.py +860 -0
- derivkit/forecasting/sampling_utils.py +388 -0
- derivkit/likelihood_kit.py +114 -0
- derivkit/likelihoods/__init__.py +1 -0
- derivkit/likelihoods/gaussian.py +136 -0
- derivkit/likelihoods/poisson.py +176 -0
- derivkit/utils/__init__.py +13 -0
- derivkit/utils/concurrency.py +213 -0
- derivkit/utils/extrapolation.py +254 -0
- derivkit/utils/linalg.py +513 -0
- derivkit/utils/logger.py +26 -0
- derivkit/utils/numerics.py +262 -0
- derivkit/utils/sandbox.py +74 -0
- derivkit/utils/types.py +15 -0
- derivkit/utils/validate.py +811 -0
- derivkit-1.0.0.dist-info/METADATA +50 -0
- derivkit-1.0.0.dist-info/RECORD +68 -0
- derivkit-1.0.0.dist-info/WHEEL +5 -0
- derivkit-1.0.0.dist-info/licenses/LICENSE +21 -0
- derivkit-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"""Batch evaluation utilities for finite-difference methods."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Callable, Sequence
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from derivkit.utils.concurrency import (
|
|
10
|
+
parallel_execute,
|
|
11
|
+
resolve_inner_from_outer,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all__ = ["eval_points"]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _all_scalar_like(xs: Sequence[Any]) -> bool:
|
|
18
|
+
"""Returns ``True`` if all entries in ``xs`` are scalar-like.
|
|
19
|
+
|
|
20
|
+
Scalar-like means:
|
|
21
|
+
- Python scalar, or
|
|
22
|
+
- 0-dim numpy array.
|
|
23
|
+
"""
|
|
24
|
+
for x in xs:
|
|
25
|
+
if np.isscalar(x):
|
|
26
|
+
continue
|
|
27
|
+
if isinstance(x, np.ndarray) and x.shape == ():
|
|
28
|
+
continue
|
|
29
|
+
return False
|
|
30
|
+
return True
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def eval_points(
|
|
34
|
+
func: Callable[[Any], Any],
|
|
35
|
+
xs: Sequence[Any],
|
|
36
|
+
n_workers: int | None = None,
|
|
37
|
+
) -> np.ndarray:
|
|
38
|
+
"""Evaluates ``func`` at a sequence of points.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
func: Callable taking a single argument (scalar or array-like).
|
|
42
|
+
xs: 1D sequence of points at which to evaluate ``func``.
|
|
43
|
+
Entries may be scalars or array-like objects (e.g. vectors/tensors).
|
|
44
|
+
n_workers: Number of parallel outer workers. If ``None`` or <=1,
|
|
45
|
+
runs serially. If greater than the number of points, capped to
|
|
46
|
+
that number.
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
An array of function values at the specified points.
|
|
50
|
+
"""
|
|
51
|
+
xs_list = list(xs)
|
|
52
|
+
if not xs_list:
|
|
53
|
+
return np.asarray([], dtype=float)
|
|
54
|
+
|
|
55
|
+
scalar_like = _all_scalar_like(xs_list)
|
|
56
|
+
args = _to_eval_args(xs_list, scalar_like)
|
|
57
|
+
|
|
58
|
+
outer_workers = _cap_outer_workers(n_workers, len(args))
|
|
59
|
+
inner_workers = resolve_inner_from_outer(outer_workers)
|
|
60
|
+
|
|
61
|
+
# parallel_execute handles both outer >1 and outer == 1 paths.
|
|
62
|
+
arg_tuples = [(x,) for x in args]
|
|
63
|
+
vals = parallel_execute(
|
|
64
|
+
worker=func,
|
|
65
|
+
arg_tuples=arg_tuples,
|
|
66
|
+
outer_workers=outer_workers,
|
|
67
|
+
inner_workers=inner_workers,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
return np.asarray(vals)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _to_eval_args(xs: Sequence[Any], scalar_like: bool) -> list[Any]:
|
|
74
|
+
"""Prepare arguments for evaluation.
|
|
75
|
+
|
|
76
|
+
If all entries are scalar-like, cast to float (legacy behaviour).
|
|
77
|
+
Otherwise, pass through as-is to support tensor/array inputs.
|
|
78
|
+
"""
|
|
79
|
+
if scalar_like:
|
|
80
|
+
return [float(x) for x in xs]
|
|
81
|
+
return list(xs)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _cap_outer_workers(n_workers: int | None, n_tasks: int) -> int:
|
|
85
|
+
"""Cap outer workers by number of tasks; ensure at least ``1``."""
|
|
86
|
+
if n_workers is None or n_workers <= 1:
|
|
87
|
+
return 1
|
|
88
|
+
n = int(n_workers)
|
|
89
|
+
if n_tasks <= 0:
|
|
90
|
+
return 1
|
|
91
|
+
return max(1, min(n, int(n_tasks)))
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Finite difference derivative estimation with a single step size."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from numpy.typing import NDArray
|
|
7
|
+
|
|
8
|
+
from .batch_eval import eval_points
|
|
9
|
+
from .stencil import get_finite_difference_tables
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"single_finite_step",
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def single_finite_step(
|
|
17
|
+
function,
|
|
18
|
+
x0: float,
|
|
19
|
+
order: int,
|
|
20
|
+
stepsize: float,
|
|
21
|
+
num_points: int,
|
|
22
|
+
n_workers: int,
|
|
23
|
+
) -> NDArray | float:
|
|
24
|
+
"""Returns one central finite-difference estimate at a given step size h.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
function:
|
|
28
|
+
The function whose derivative is to be estimated. Must accept
|
|
29
|
+
a float or NumPy array and return a float or NumPy array.
|
|
30
|
+
x0:
|
|
31
|
+
The point at which to evaluate the derivative.
|
|
32
|
+
order:
|
|
33
|
+
The order of the derivative to compute.
|
|
34
|
+
stepsize:
|
|
35
|
+
The step size (h) used to evaluate the function around x0.
|
|
36
|
+
num_points:
|
|
37
|
+
The number of points in the finite difference stencil. Must be
|
|
38
|
+
one of [3, 5, 7, 9].
|
|
39
|
+
n_workers:
|
|
40
|
+
The number of workers to use in multiprocessing. Default is ``1``.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
The estimated derivative. Returns a float for scalar-valued functions,
|
|
44
|
+
or a NumPy array for vector-valued functions.
|
|
45
|
+
|
|
46
|
+
Raises:
|
|
47
|
+
ValueError:
|
|
48
|
+
If the combination of ``num_points`` and ``order`` is not supported.
|
|
49
|
+
"""
|
|
50
|
+
offsets, coeffs_table = get_finite_difference_tables(stepsize)
|
|
51
|
+
key = (num_points, order)
|
|
52
|
+
if key not in coeffs_table:
|
|
53
|
+
raise ValueError(
|
|
54
|
+
f"[FiniteDifference] Internal table missing coefficients for "
|
|
55
|
+
f"stencil={num_points}, order={order}."
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
stencil = np.array(
|
|
59
|
+
[x0 + i * stepsize for i in offsets[num_points]],
|
|
60
|
+
dtype=float,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# values shape: (n_stencil,) for scalar outputs, (n_stencil, *out_shape) otherwise
|
|
64
|
+
values = eval_points(function, stencil, n_workers=n_workers)
|
|
65
|
+
values = np.asarray(values, dtype=float)
|
|
66
|
+
|
|
67
|
+
coeff = np.asarray(coeffs_table[key], dtype=float) # shape (n_stencil,)
|
|
68
|
+
|
|
69
|
+
if values.ndim == 1: # this is the scalar-valued case
|
|
70
|
+
deriv = float(np.dot(coeff, values))
|
|
71
|
+
return deriv
|
|
72
|
+
|
|
73
|
+
# In the case of vector and tensor outputs, use tensordot to contract
|
|
74
|
+
# the leading stencil dimension with the coeffs.
|
|
75
|
+
# coeff shape: (n_stencil,)
|
|
76
|
+
# values shape: (n_stencil, ... )
|
|
77
|
+
# deriv shape: (...) after contraction.
|
|
78
|
+
deriv = np.tensordot(coeff, values, axes=(0, 0))
|
|
79
|
+
|
|
80
|
+
if np.ndim(deriv) == 0:
|
|
81
|
+
return float(deriv)
|
|
82
|
+
|
|
83
|
+
# Otherwise flatten trailing dims in C order to match the rest of DerivKit.
|
|
84
|
+
return np.ravel(deriv, order="C")
|
|
@@ -0,0 +1,511 @@
|
|
|
1
|
+
"""Finite difference extrapolation utilities."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
from numpy.typing import NDArray
|
|
9
|
+
|
|
10
|
+
from derivkit.utils.extrapolation import (
|
|
11
|
+
gauss_richardson_extrapolate,
|
|
12
|
+
richardson_extrapolate,
|
|
13
|
+
ridders_extrapolate,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"fixed_richardson_fd",
|
|
18
|
+
"fixed_ridders_fd",
|
|
19
|
+
"adaptive_richardson_fd",
|
|
20
|
+
"adaptive_ridders_fd",
|
|
21
|
+
"fixed_gre_fd",
|
|
22
|
+
"adaptive_gre_fd",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def fixed_richardson_fd(
|
|
27
|
+
single_finite: Callable[[int, float, int, int], NDArray | float],
|
|
28
|
+
*,
|
|
29
|
+
order: int,
|
|
30
|
+
stepsize: float,
|
|
31
|
+
num_points: int,
|
|
32
|
+
n_workers: int,
|
|
33
|
+
levels: int,
|
|
34
|
+
p: int = 2,
|
|
35
|
+
r: float = 2.0,
|
|
36
|
+
return_error: bool = False,
|
|
37
|
+
) -> NDArray | float | tuple[NDArray | float, NDArray | float]:
|
|
38
|
+
"""Returns fixed-level Richardson extrapolation for finite differences.
|
|
39
|
+
|
|
40
|
+
Fixed level means we compute m base estimates with step sizes h, h/r, h/r^2, ..., h/r^(m-1)
|
|
41
|
+
and then apply Richardson extrapolation to get the final estimate.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
single_finite: Function that computes a single finite difference estimate.
|
|
45
|
+
order: The order of the derivative to compute.
|
|
46
|
+
stepsize: The initial step size h.
|
|
47
|
+
num_points: Number of points in the finite difference stencil.
|
|
48
|
+
n_workers: Number of parallel workers to use.
|
|
49
|
+
levels: Number of levels (m) for Richardson extrapolation.
|
|
50
|
+
p: The order of the leading error term in the finite difference
|
|
51
|
+
approximation. Default is ``2``.
|
|
52
|
+
r: The step-size reduction factor between successive levels
|
|
53
|
+
(default is ``2.0``).
|
|
54
|
+
return_error: Whether to return an error estimate along with the
|
|
55
|
+
value (default is ``False``).
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
The Richardson-extrapolated finite difference estimate. If `return_error` is True,
|
|
59
|
+
also returns an error estimate.
|
|
60
|
+
"""
|
|
61
|
+
if levels < 2:
|
|
62
|
+
raise ValueError("fixed_richardson_fd requires levels >= 2 for Richardson extrapolation.")
|
|
63
|
+
|
|
64
|
+
base_values: list[NDArray | float] = []
|
|
65
|
+
h = float(stepsize)
|
|
66
|
+
|
|
67
|
+
for _ in range(levels):
|
|
68
|
+
base_values.append(single_finite(order, h, num_points, n_workers))
|
|
69
|
+
h /= r
|
|
70
|
+
|
|
71
|
+
est = richardson_extrapolate(base_values, p=p, r=r)
|
|
72
|
+
|
|
73
|
+
if not return_error:
|
|
74
|
+
return est
|
|
75
|
+
|
|
76
|
+
est_arr = np.asarray(est, dtype=float)
|
|
77
|
+
|
|
78
|
+
# With only two levels, we can’t form a previous Richardson estimate; use zeros.
|
|
79
|
+
if levels == 2:
|
|
80
|
+
err = np.zeros_like(est_arr)
|
|
81
|
+
return est, err
|
|
82
|
+
|
|
83
|
+
prev_est = richardson_extrapolate(base_values[:-1], p=p, r=r)
|
|
84
|
+
prev_arr = np.asarray(prev_est, dtype=float)
|
|
85
|
+
err = np.abs(est_arr - prev_arr)
|
|
86
|
+
return est, err
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def fixed_ridders_fd(
|
|
90
|
+
single_finite: Callable[[int, float, int, int], NDArray | float],
|
|
91
|
+
*,
|
|
92
|
+
order: int,
|
|
93
|
+
stepsize: float,
|
|
94
|
+
num_points: int,
|
|
95
|
+
n_workers: int,
|
|
96
|
+
levels: int,
|
|
97
|
+
p: int = 2,
|
|
98
|
+
r: float = 2.0,
|
|
99
|
+
return_error: bool = False,
|
|
100
|
+
) -> NDArray | float | tuple[NDArray | float, NDArray | float]:
|
|
101
|
+
"""Returns a fixed-level Ridders extrapolation for finite differences.
|
|
102
|
+
|
|
103
|
+
Fixed level means we compute m base estimates with step sizes h, h/r, h/r^2, ..., h/r^(m-1)
|
|
104
|
+
and then apply Ridders extrapolation to get the final estimate.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
single_finite:
|
|
108
|
+
Function that computes a single finite difference estimate.
|
|
109
|
+
order:
|
|
110
|
+
The order of the derivative to compute.
|
|
111
|
+
stepsize:
|
|
112
|
+
The initial step size h.
|
|
113
|
+
num_points:
|
|
114
|
+
Number of points in the finite difference stencil.
|
|
115
|
+
n_workers:
|
|
116
|
+
Number of parallel workers to use.
|
|
117
|
+
levels:
|
|
118
|
+
Number of levels (m) for Ridders extrapolation.
|
|
119
|
+
p:
|
|
120
|
+
The order of the leading error term in the finite difference
|
|
121
|
+
approximation (default is ``2``).
|
|
122
|
+
r:
|
|
123
|
+
The step-size reduction factor between successive levels
|
|
124
|
+
(default is ``2.0``).
|
|
125
|
+
return_error:
|
|
126
|
+
Whether to return an error estimate along with the value
|
|
127
|
+
(default is ``False``).
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
The Ridders-extrapolated finite difference estimate. If `return_error` is True,
|
|
131
|
+
also returns an error estimate.
|
|
132
|
+
"""
|
|
133
|
+
base_values: list[NDArray | float] = []
|
|
134
|
+
h = float(stepsize)
|
|
135
|
+
|
|
136
|
+
for _ in range(levels):
|
|
137
|
+
base_values.append(single_finite(order, h, num_points, n_workers))
|
|
138
|
+
h /= r
|
|
139
|
+
|
|
140
|
+
value, err = ridders_extrapolate(base_values, r=r, p=p)
|
|
141
|
+
return (value, err) if return_error else value
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def adaptive_richardson_fd(
|
|
145
|
+
single_finite: Callable[[int, float, int, int], NDArray | float],
|
|
146
|
+
*,
|
|
147
|
+
order: int,
|
|
148
|
+
stepsize: float,
|
|
149
|
+
num_points: int,
|
|
150
|
+
n_workers: int,
|
|
151
|
+
p: int,
|
|
152
|
+
max_levels: int = 6,
|
|
153
|
+
min_levels: int = 2,
|
|
154
|
+
r: float = 2.0,
|
|
155
|
+
rtol: float = 1e-8,
|
|
156
|
+
atol: float = 1e-12,
|
|
157
|
+
return_error: bool = False,
|
|
158
|
+
) -> NDArray | float | tuple[NDArray | float, NDArray | float]:
|
|
159
|
+
"""Returns an adaptive Richardson extrapolation for finite differences.
|
|
160
|
+
|
|
161
|
+
This function computes finite difference estimates at decreasing step sizes
|
|
162
|
+
and applies Richardson extrapolation iteratively until convergence is achieved
|
|
163
|
+
based on specified tolerances.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
single_finite: Function that computes a single finite difference
|
|
167
|
+
estimate.
|
|
168
|
+
order: The order of the derivative to compute.
|
|
169
|
+
stepsize: The initial step size h.
|
|
170
|
+
num_points: Number of points in the finite difference stencil.
|
|
171
|
+
n_workers: Number of parallel workers to use.
|
|
172
|
+
p: The order of the leading error term in the finite difference
|
|
173
|
+
approximation.
|
|
174
|
+
max_levels: Maximum number of levels of extrapolation to perform
|
|
175
|
+
(default is ``6``).
|
|
176
|
+
min_levels: Minimum number of levels of extrapolation before checking
|
|
177
|
+
for convergence. Default is ``2``.
|
|
178
|
+
r: The step-size reduction factor between successive levels
|
|
179
|
+
(default is ``2.0``).
|
|
180
|
+
rtol: Relative tolerance for convergence (default is ``1e-8``).
|
|
181
|
+
atol: Absolute tolerance for convergence (default is ``1e-12``).
|
|
182
|
+
return_error: Whether to return an error estimate along with the value
|
|
183
|
+
(default is ``False``).
|
|
184
|
+
|
|
185
|
+
Returns: The Richardson-extrapolated finite difference estimate.
|
|
186
|
+
"""
|
|
187
|
+
base_values: list[NDArray | float] = []
|
|
188
|
+
h = float(stepsize)
|
|
189
|
+
|
|
190
|
+
best = None
|
|
191
|
+
best_est = None
|
|
192
|
+
last_err = None
|
|
193
|
+
|
|
194
|
+
for level in range(max_levels):
|
|
195
|
+
val = single_finite(order, h, num_points, n_workers)
|
|
196
|
+
base_values.append(val)
|
|
197
|
+
h /= r
|
|
198
|
+
|
|
199
|
+
if level + 1 < min_levels:
|
|
200
|
+
continue
|
|
201
|
+
|
|
202
|
+
est = richardson_extrapolate(base_values, p=p, r=r)
|
|
203
|
+
|
|
204
|
+
if best_est is None:
|
|
205
|
+
best_est = est
|
|
206
|
+
best = est
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
est_arr = np.asarray(est, dtype=float)
|
|
210
|
+
best_arr = np.asarray(best_est, dtype=float)
|
|
211
|
+
|
|
212
|
+
diff = np.max(np.abs(est_arr - best_arr))
|
|
213
|
+
scale = np.max([1.0, np.max(np.abs(est_arr)), np.max(np.abs(best_arr))])
|
|
214
|
+
err_arr = np.full_like(est_arr, diff)
|
|
215
|
+
|
|
216
|
+
if diff <= atol + rtol * scale:
|
|
217
|
+
if not return_error:
|
|
218
|
+
return est
|
|
219
|
+
err = np.zeros_like(est_arr) if last_err is None else last_err
|
|
220
|
+
return est, err
|
|
221
|
+
|
|
222
|
+
if diff > 10.0 * (atol + rtol * scale):
|
|
223
|
+
if not return_error:
|
|
224
|
+
return best
|
|
225
|
+
return best, err_arr
|
|
226
|
+
|
|
227
|
+
last_err = err_arr
|
|
228
|
+
best_est = est
|
|
229
|
+
best = est
|
|
230
|
+
|
|
231
|
+
if best_est is None:
|
|
232
|
+
best_est = base_values[-1]
|
|
233
|
+
last_err = np.zeros_like(np.asarray(best_est, dtype=float))
|
|
234
|
+
|
|
235
|
+
if not return_error:
|
|
236
|
+
return best_est
|
|
237
|
+
return best_est, last_err
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def adaptive_ridders_fd(
|
|
241
|
+
single_finite: Callable[[int, float, int, int], NDArray | float],
|
|
242
|
+
*,
|
|
243
|
+
order: int,
|
|
244
|
+
stepsize: float,
|
|
245
|
+
num_points: int,
|
|
246
|
+
n_workers: int,
|
|
247
|
+
p: int,
|
|
248
|
+
max_levels: int = 6,
|
|
249
|
+
min_levels: int = 2,
|
|
250
|
+
r: float = 2.0,
|
|
251
|
+
rtol: float = 1e-8,
|
|
252
|
+
atol: float = 1e-12,
|
|
253
|
+
return_error: bool = False,
|
|
254
|
+
) -> NDArray | float | tuple[NDArray | float, NDArray | float]:
|
|
255
|
+
"""Returns an adaptive Ridders extrapolation for finite differences.
|
|
256
|
+
|
|
257
|
+
This function computes finite difference estimates at decreasing step sizes,
|
|
258
|
+
building up a sequence of base values and repeatedly applying Ridders
|
|
259
|
+
extrapolation until convergence is achieved based on specified tolerances.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
single_finite: Function that computes a single finite difference
|
|
263
|
+
estimate for a given derivative order and step size
|
|
264
|
+
``single_finite(order, h, num_points, n_workers)``.
|
|
265
|
+
order: The order of the derivative to compute.
|
|
266
|
+
stepsize: The initial step size ``h``.
|
|
267
|
+
num_points: Number of points in the finite difference stencil.
|
|
268
|
+
n_workers: Number of parallel workers to use.
|
|
269
|
+
p: The order of the leading error term in the finite difference
|
|
270
|
+
approximation.
|
|
271
|
+
max_levels: Maximum number of levels of extrapolation to perform
|
|
272
|
+
(default is ``6``).
|
|
273
|
+
min_levels: Minimum number of levels of extrapolation before checking
|
|
274
|
+
for convergence (default is ``2``).
|
|
275
|
+
r: Step-size reduction factor between successive levels
|
|
276
|
+
(default is ``2.0``).
|
|
277
|
+
rtol: Relative tolerance for convergence (default is 1e-8).
|
|
278
|
+
atol: Absolute tolerance for convergence (default is 1e-12).
|
|
279
|
+
return_error: Whether to return an error estimate along with the value
|
|
280
|
+
(default is ``False``).
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
The Ridders-extrapolated finite difference estimate.
|
|
284
|
+
"""
|
|
285
|
+
base_values: list[NDArray | float] = []
|
|
286
|
+
h = float(stepsize)
|
|
287
|
+
|
|
288
|
+
best_est: NDArray | float | None = None
|
|
289
|
+
best_err: NDArray | float | None = None
|
|
290
|
+
|
|
291
|
+
for level in range(max_levels):
|
|
292
|
+
val = single_finite(order, h, num_points, n_workers)
|
|
293
|
+
base_values.append(val)
|
|
294
|
+
h /= r
|
|
295
|
+
|
|
296
|
+
if level + 1 < min_levels:
|
|
297
|
+
continue
|
|
298
|
+
|
|
299
|
+
est, err = ridders_extrapolate(base_values, p=p, r=r)
|
|
300
|
+
|
|
301
|
+
if best_est is None:
|
|
302
|
+
best_est = est
|
|
303
|
+
best_err = err
|
|
304
|
+
continue
|
|
305
|
+
|
|
306
|
+
est_arr = np.asarray(est, dtype=float)
|
|
307
|
+
best_arr = np.asarray(best_est, dtype=float)
|
|
308
|
+
|
|
309
|
+
diff = np.max(np.abs(est_arr - best_arr))
|
|
310
|
+
scale = np.max([1.0, np.max(np.abs(est_arr)), np.max(np.abs(best_arr))])
|
|
311
|
+
thresh = atol + rtol * scale
|
|
312
|
+
diff_arr = np.full_like(est_arr, diff)
|
|
313
|
+
|
|
314
|
+
# Converged: we accept latest estimate
|
|
315
|
+
if diff <= thresh:
|
|
316
|
+
if not return_error:
|
|
317
|
+
return est
|
|
318
|
+
out_err = err if err is not None else diff_arr
|
|
319
|
+
return est, out_err
|
|
320
|
+
|
|
321
|
+
# Diverged badly: we fall back to previous best
|
|
322
|
+
if diff > 10.0 * thresh:
|
|
323
|
+
if not return_error:
|
|
324
|
+
return best_est
|
|
325
|
+
out_err = best_err if best_err is not None else diff_arr
|
|
326
|
+
return best_est, out_err
|
|
327
|
+
|
|
328
|
+
best_est = est
|
|
329
|
+
best_err = err
|
|
330
|
+
|
|
331
|
+
if best_est is None:
|
|
332
|
+
best_est = base_values[-1]
|
|
333
|
+
best_err = np.zeros_like(np.asarray(best_est, dtype=float))
|
|
334
|
+
|
|
335
|
+
if not return_error:
|
|
336
|
+
return best_est
|
|
337
|
+
return best_est, best_err
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def fixed_gre_fd(
|
|
341
|
+
single_finite: Callable[[int, float, int, int], NDArray | float],
|
|
342
|
+
*,
|
|
343
|
+
order: int,
|
|
344
|
+
stepsize: float,
|
|
345
|
+
num_points: int,
|
|
346
|
+
n_workers: int,
|
|
347
|
+
p: int,
|
|
348
|
+
levels: int,
|
|
349
|
+
r: float = 2.0,
|
|
350
|
+
return_error: bool = False,
|
|
351
|
+
) -> NDArray | float | tuple[NDArray | float, NDArray | float]:
|
|
352
|
+
"""Returns a fixed-level Gauss–Richardson extrapolation for finite differences.
|
|
353
|
+
|
|
354
|
+
Fixed level means we compute m base estimates with step sizes h, h/r, h/r^2, ..., h/r^(m-1)
|
|
355
|
+
and then apply Gauss–Richardson extrapolation to get the final estimate.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
single_finite: Function that computes a single finite difference
|
|
359
|
+
estimate.
|
|
360
|
+
order: The order of the derivative to compute.
|
|
361
|
+
stepsize: The initial step size h.
|
|
362
|
+
num_points: Number of points in the finite difference stencil.
|
|
363
|
+
n_workers: Number of parallel workers to use.
|
|
364
|
+
p: The order of the leading error term in the finite difference
|
|
365
|
+
approximation.
|
|
366
|
+
levels: Number of levels (m) for Gauss–Richardson extrapolation.
|
|
367
|
+
r: The step-size reduction factor between successive levels
|
|
368
|
+
(default is ``2.0``).
|
|
369
|
+
return_error: Whether to return an error estimate along with the
|
|
370
|
+
value (default is ``False``).
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
The Gauss–Richardson-extrapolated finite difference estimate. If
|
|
374
|
+
`return_error` is True, also returns an error estimate.
|
|
375
|
+
|
|
376
|
+
Raises:
|
|
377
|
+
ValueError: If the combination of ``num_points`` and ``order`` is not
|
|
378
|
+
supported or if levels < 2.
|
|
379
|
+
"""
|
|
380
|
+
if levels < 2:
|
|
381
|
+
raise ValueError("fixed_gre_fd requires levels >= 2.")
|
|
382
|
+
|
|
383
|
+
base_values: list[NDArray | float] = []
|
|
384
|
+
h_values: list[float] = []
|
|
385
|
+
h = float(stepsize)
|
|
386
|
+
|
|
387
|
+
for _ in range(levels):
|
|
388
|
+
base_values.append(single_finite(order, h, num_points, n_workers))
|
|
389
|
+
h_values.append(h)
|
|
390
|
+
h /= r
|
|
391
|
+
|
|
392
|
+
value, err = gauss_richardson_extrapolate(base_values, h_values=h_values, p=p)
|
|
393
|
+
return (value, err) if return_error else value
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
def adaptive_gre_fd(
|
|
397
|
+
single_finite: Callable[[int, float, int, int], NDArray | float],
|
|
398
|
+
*,
|
|
399
|
+
order: int,
|
|
400
|
+
stepsize: float,
|
|
401
|
+
num_points: int,
|
|
402
|
+
n_workers: int,
|
|
403
|
+
p: int,
|
|
404
|
+
max_levels: int = 6,
|
|
405
|
+
min_levels: int = 2,
|
|
406
|
+
r: float = 2.0,
|
|
407
|
+
rtol: float = 1e-8,
|
|
408
|
+
atol: float = 1e-12,
|
|
409
|
+
return_error: bool = False,
|
|
410
|
+
) -> NDArray | float | tuple[NDArray | float, NDArray | float]:
|
|
411
|
+
"""Returns an adaptive Gauss–Richardson extrapolation for finite differences.
|
|
412
|
+
|
|
413
|
+
This function computes finite-difference estimates at decreasing step sizes,
|
|
414
|
+
builds up sequences of base values and corresponding step sizes, and applies
|
|
415
|
+
Gauss–Richardson extrapolation iteratively until convergence based on the
|
|
416
|
+
change between successive estimates.
|
|
417
|
+
|
|
418
|
+
Args:
|
|
419
|
+
single_finite: Function that computes a single finite-difference
|
|
420
|
+
estimate for a given derivative order and step size
|
|
421
|
+
``single_finite(order, h, num_points, n_workers)``.
|
|
422
|
+
order: The order of the derivative to compute.
|
|
423
|
+
stepsize: The initial step size ``h``.
|
|
424
|
+
num_points: Number of points in the finite-difference stencil.
|
|
425
|
+
n_workers: Number of parallel workers to use.
|
|
426
|
+
p: The order of the leading error term in the finite difference
|
|
427
|
+
approximation.
|
|
428
|
+
max_levels: Maximum number of levels of extrapolation to perform
|
|
429
|
+
(default is ``6``).
|
|
430
|
+
min_levels: Minimum number of levels of extrapolation before checking
|
|
431
|
+
for convergence (default is ``2``).
|
|
432
|
+
r: Step-size reduction factor between successive levels
|
|
433
|
+
(default is ``2.0``).
|
|
434
|
+
rtol: Relative tolerance for convergence
|
|
435
|
+
(default is ``1e-8``).
|
|
436
|
+
atol: Absolute tolerance for convergence
|
|
437
|
+
(default is ``1e-12``).
|
|
438
|
+
return_error: Whether to return an error estimate along with the value
|
|
439
|
+
(default is ``False``).
|
|
440
|
+
|
|
441
|
+
Returns:
|
|
442
|
+
The Gauss–Richardson-extrapolated finite-difference estimate. If
|
|
443
|
+
``return_error`` is True, also returns an error estimate with the
|
|
444
|
+
same shape as the estimate.
|
|
445
|
+
"""
|
|
446
|
+
base_values: list[NDArray | float] = []
|
|
447
|
+
h_values: list[float] = []
|
|
448
|
+
h = float(stepsize)
|
|
449
|
+
|
|
450
|
+
best_est: NDArray | float | None = None
|
|
451
|
+
last_err: NDArray | float | None = None
|
|
452
|
+
|
|
453
|
+
result_est: NDArray | float | None = None
|
|
454
|
+
result_err: NDArray | float | None = None
|
|
455
|
+
|
|
456
|
+
for level in range(max_levels):
|
|
457
|
+
val = single_finite(order, h, num_points, n_workers)
|
|
458
|
+
base_values.append(val)
|
|
459
|
+
h_values.append(h)
|
|
460
|
+
h /= r
|
|
461
|
+
|
|
462
|
+
if level + 1 < min_levels:
|
|
463
|
+
continue
|
|
464
|
+
|
|
465
|
+
est, _ = gauss_richardson_extrapolate(
|
|
466
|
+
base_values,
|
|
467
|
+
h_values=h_values,
|
|
468
|
+
p=p,
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
if best_est is None:
|
|
472
|
+
best_est = est
|
|
473
|
+
continue
|
|
474
|
+
|
|
475
|
+
est_arr = np.asarray(est, dtype=float)
|
|
476
|
+
best_arr = np.asarray(best_est, dtype=float)
|
|
477
|
+
|
|
478
|
+
diff = np.max(np.abs(est_arr - best_arr))
|
|
479
|
+
scale = np.max([1.0, np.max(np.abs(est_arr)), np.max(np.abs(best_arr))])
|
|
480
|
+
thresh = atol + rtol * scale
|
|
481
|
+
err_arr = np.full_like(est_arr, diff)
|
|
482
|
+
|
|
483
|
+
# Converged: accept latest estimate
|
|
484
|
+
if diff <= thresh:
|
|
485
|
+
result_est = est
|
|
486
|
+
# if we have a previous error, keep it; otherwise use this diff
|
|
487
|
+
result_err = last_err if last_err is not None else err_arr
|
|
488
|
+
break
|
|
489
|
+
|
|
490
|
+
# Diverged badly: fall back to previous best
|
|
491
|
+
if diff >= 10.0 * thresh:
|
|
492
|
+
result_est = best_est
|
|
493
|
+
result_err = err_arr
|
|
494
|
+
break
|
|
495
|
+
|
|
496
|
+
# Continue refining
|
|
497
|
+
best_est = est
|
|
498
|
+
last_err = err_arr
|
|
499
|
+
|
|
500
|
+
if result_est is None:
|
|
501
|
+
if best_est is None:
|
|
502
|
+
best_est = base_values[-1]
|
|
503
|
+
last_err = np.zeros_like(np.asarray(best_est, dtype=float))
|
|
504
|
+
result_est = best_est
|
|
505
|
+
if last_err is None:
|
|
506
|
+
last_err = np.zeros_like(np.asarray(result_est, dtype=float))
|
|
507
|
+
result_err = last_err
|
|
508
|
+
|
|
509
|
+
if not return_error:
|
|
510
|
+
return result_est
|
|
511
|
+
return result_est, result_err
|