derivkit 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- derivkit/__init__.py +22 -0
- derivkit/calculus/__init__.py +17 -0
- derivkit/calculus/calculus_core.py +152 -0
- derivkit/calculus/gradient.py +97 -0
- derivkit/calculus/hessian.py +528 -0
- derivkit/calculus/hyper_hessian.py +296 -0
- derivkit/calculus/jacobian.py +156 -0
- derivkit/calculus_kit.py +128 -0
- derivkit/derivative_kit.py +315 -0
- derivkit/derivatives/__init__.py +6 -0
- derivkit/derivatives/adaptive/__init__.py +5 -0
- derivkit/derivatives/adaptive/adaptive_fit.py +238 -0
- derivkit/derivatives/adaptive/batch_eval.py +179 -0
- derivkit/derivatives/adaptive/diagnostics.py +325 -0
- derivkit/derivatives/adaptive/grid.py +333 -0
- derivkit/derivatives/adaptive/polyfit_utils.py +513 -0
- derivkit/derivatives/adaptive/spacing.py +66 -0
- derivkit/derivatives/adaptive/transforms.py +245 -0
- derivkit/derivatives/autodiff/__init__.py +1 -0
- derivkit/derivatives/autodiff/jax_autodiff.py +95 -0
- derivkit/derivatives/autodiff/jax_core.py +217 -0
- derivkit/derivatives/autodiff/jax_utils.py +146 -0
- derivkit/derivatives/finite/__init__.py +5 -0
- derivkit/derivatives/finite/batch_eval.py +91 -0
- derivkit/derivatives/finite/core.py +84 -0
- derivkit/derivatives/finite/extrapolators.py +511 -0
- derivkit/derivatives/finite/finite_difference.py +247 -0
- derivkit/derivatives/finite/stencil.py +206 -0
- derivkit/derivatives/fornberg.py +245 -0
- derivkit/derivatives/local_polynomial_derivative/__init__.py +1 -0
- derivkit/derivatives/local_polynomial_derivative/diagnostics.py +90 -0
- derivkit/derivatives/local_polynomial_derivative/fit.py +199 -0
- derivkit/derivatives/local_polynomial_derivative/local_poly_config.py +95 -0
- derivkit/derivatives/local_polynomial_derivative/local_polynomial_derivative.py +205 -0
- derivkit/derivatives/local_polynomial_derivative/sampling.py +72 -0
- derivkit/derivatives/tabulated_model/__init__.py +1 -0
- derivkit/derivatives/tabulated_model/one_d.py +247 -0
- derivkit/forecast_kit.py +783 -0
- derivkit/forecasting/__init__.py +1 -0
- derivkit/forecasting/dali.py +78 -0
- derivkit/forecasting/expansions.py +486 -0
- derivkit/forecasting/fisher.py +298 -0
- derivkit/forecasting/fisher_gaussian.py +171 -0
- derivkit/forecasting/fisher_xy.py +357 -0
- derivkit/forecasting/forecast_core.py +313 -0
- derivkit/forecasting/getdist_dali_samples.py +429 -0
- derivkit/forecasting/getdist_fisher_samples.py +235 -0
- derivkit/forecasting/laplace.py +259 -0
- derivkit/forecasting/priors_core.py +860 -0
- derivkit/forecasting/sampling_utils.py +388 -0
- derivkit/likelihood_kit.py +114 -0
- derivkit/likelihoods/__init__.py +1 -0
- derivkit/likelihoods/gaussian.py +136 -0
- derivkit/likelihoods/poisson.py +176 -0
- derivkit/utils/__init__.py +13 -0
- derivkit/utils/concurrency.py +213 -0
- derivkit/utils/extrapolation.py +254 -0
- derivkit/utils/linalg.py +513 -0
- derivkit/utils/logger.py +26 -0
- derivkit/utils/numerics.py +262 -0
- derivkit/utils/sandbox.py +74 -0
- derivkit/utils/types.py +15 -0
- derivkit/utils/validate.py +811 -0
- derivkit-1.0.0.dist-info/METADATA +50 -0
- derivkit-1.0.0.dist-info/RECORD +68 -0
- derivkit-1.0.0.dist-info/WHEEL +5 -0
- derivkit-1.0.0.dist-info/licenses/LICENSE +21 -0
- derivkit-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""Diagnostics for local polynomial derivative estimation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from derivkit.derivatives.local_polynomial_derivative.fit import design_matrix
|
|
10
|
+
from derivkit.derivatives.local_polynomial_derivative.local_poly_config import (
|
|
11
|
+
LocalPolyConfig,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all = ["make_diagnostics"]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def make_diagnostics(
|
|
18
|
+
x0: float,
|
|
19
|
+
config: LocalPolyConfig,
|
|
20
|
+
xs: np.ndarray,
|
|
21
|
+
ys: np.ndarray,
|
|
22
|
+
used: np.ndarray,
|
|
23
|
+
coeffs: np.ndarray,
|
|
24
|
+
degree: int,
|
|
25
|
+
order: int,
|
|
26
|
+
ok: bool,
|
|
27
|
+
) -> Dict[str, Any]:
|
|
28
|
+
"""Builds diagnostics dictionary.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
x0:
|
|
32
|
+
The center point for polynomial fitting.
|
|
33
|
+
config:
|
|
34
|
+
LocalPolyConfig instance with fitting settings.
|
|
35
|
+
xs:
|
|
36
|
+
An array of sample points (shape ``(n_samples,)``).
|
|
37
|
+
ys:
|
|
38
|
+
An array of function evaluations (shape
|
|
39
|
+
``(n_samples, n_components)``).
|
|
40
|
+
used:
|
|
41
|
+
A boolean array indicating which samples were used (shape
|
|
42
|
+
``(n_samples,)``).
|
|
43
|
+
coeffs:
|
|
44
|
+
The polynomial coefficients (shape ``(degree + 1, n_components)``).
|
|
45
|
+
degree:
|
|
46
|
+
The degree of the polynomial fit.
|
|
47
|
+
order:
|
|
48
|
+
The order of the derivative being estimated.
|
|
49
|
+
ok:
|
|
50
|
+
Whether the fit met the residual tolerances.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
A diagnostics dictionary.
|
|
54
|
+
"""
|
|
55
|
+
used_x = xs[used]
|
|
56
|
+
used_y = ys[used]
|
|
57
|
+
|
|
58
|
+
if used_x.size:
|
|
59
|
+
mat = design_matrix(x0, config, used_x, degree)
|
|
60
|
+
y_fit = mat @ coeffs
|
|
61
|
+
denom = np.maximum(np.abs(used_y), config.tol_abs)
|
|
62
|
+
err = np.abs(y_fit - used_y) / denom
|
|
63
|
+
max_err = float(err.max())
|
|
64
|
+
else:
|
|
65
|
+
max_err = float("nan")
|
|
66
|
+
|
|
67
|
+
diag: Dict[str, Any] = {
|
|
68
|
+
"ok": bool(ok),
|
|
69
|
+
"x0": float(x0),
|
|
70
|
+
"degree": int(degree),
|
|
71
|
+
"order": int(order),
|
|
72
|
+
"n_all": int(xs.size),
|
|
73
|
+
"n_used": int(used.sum()),
|
|
74
|
+
"x_used": used_x.tolist(),
|
|
75
|
+
"max_rel_err_used": max_err,
|
|
76
|
+
"tol_rel": float(config.tol_rel),
|
|
77
|
+
"tol_abs": float(config.tol_abs),
|
|
78
|
+
"min_samples": int(config.min_samples),
|
|
79
|
+
"max_trim": int(config.max_trim),
|
|
80
|
+
"center": bool(config.center),
|
|
81
|
+
"coeffs": coeffs.tolist(),
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
if not ok:
|
|
85
|
+
diag["note"] = (
|
|
86
|
+
"No interval fully satisfied residual tolerances; derivative is taken "
|
|
87
|
+
"from the last polynomial fit and should be treated with caution."
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
return diag
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
"""Local polynomial fitting with outlier trimming."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
from derivkit.derivatives.local_polynomial_derivative.local_poly_config import (
|
|
8
|
+
LocalPolyConfig,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"design_matrix",
|
|
13
|
+
"trimmed_polyfit",
|
|
14
|
+
"centered_polyfit_least_squares",
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def design_matrix(
|
|
19
|
+
x0: float,
|
|
20
|
+
config: LocalPolyConfig,
|
|
21
|
+
sample_points: np.ndarray,
|
|
22
|
+
degree: int) -> np.ndarray:
|
|
23
|
+
"""Builds a Vandermonde design matrix.
|
|
24
|
+
|
|
25
|
+
This method constructs the Vandermonde matrix for polynomial fitting based
|
|
26
|
+
on whether centering around x0 is specified in the config.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
x0:
|
|
30
|
+
The center point for polynomial fitting.
|
|
31
|
+
config:
|
|
32
|
+
LocalPolyConfig instance with fitting settings.
|
|
33
|
+
sample_points:
|
|
34
|
+
An array of sample points (shape ``(n_samples,)``).
|
|
35
|
+
degree:
|
|
36
|
+
The degree of the polynomial to fit.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
A Vandermonde matrix (shape ``(n_samples, degree + 1)``).
|
|
40
|
+
"""
|
|
41
|
+
if config.center:
|
|
42
|
+
z = sample_points - x0
|
|
43
|
+
else:
|
|
44
|
+
z = sample_points
|
|
45
|
+
return np.vander(z, N=degree + 1, increasing=True)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def trimmed_polyfit(
|
|
49
|
+
x0: float,
|
|
50
|
+
config: LocalPolyConfig,
|
|
51
|
+
xs: np.ndarray,
|
|
52
|
+
ys: np.ndarray,
|
|
53
|
+
degree: int,
|
|
54
|
+
) -> tuple[np.ndarray, np.ndarray, bool]:
|
|
55
|
+
"""Returns a polynomial fit with trimmed outliers.
|
|
56
|
+
|
|
57
|
+
This method fits a polynomial of the specified degree to the provided samples
|
|
58
|
+
and iteratively removes sample points whose residuals exceed the configured
|
|
59
|
+
tolerances. Trimming continues until either all residuals are within tolerance
|
|
60
|
+
or the maximum number of trims is reached. If trimming can no longer proceed
|
|
61
|
+
without violating the minimum sample requirement, the last valid fit is
|
|
62
|
+
returned.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
x0:
|
|
66
|
+
The center point for polynomial fitting.
|
|
67
|
+
config:
|
|
68
|
+
LocalPolyConfig instance with fitting settings.
|
|
69
|
+
xs:
|
|
70
|
+
An array of sample points (shape ``(n_samples,)``).
|
|
71
|
+
ys:
|
|
72
|
+
An array of function evaluations (shape
|
|
73
|
+
``(n_samples, n_components)``).
|
|
74
|
+
degree:
|
|
75
|
+
The degree of the polynomial to fit.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
coeffs:
|
|
79
|
+
The fitted polynomial coefficients. Each column corresponds to
|
|
80
|
+
one output component of ``ys``, and row ``k`` contains the
|
|
81
|
+
coefficient of the ``x^k`` term (or ``(x−x0)^k`` if centering is
|
|
82
|
+
enabled).
|
|
83
|
+
used_mask:
|
|
84
|
+
A boolean array indicating which sample points were kept after
|
|
85
|
+
trimming. ``True`` means the point was used in the final fit.
|
|
86
|
+
ok:
|
|
87
|
+
``True`` if, after trimming, all remaining sample points satisfied
|
|
88
|
+
the residual tolerances defined in ``config``. ``False`` means the
|
|
89
|
+
loop stopped due to hitting trimming limits or minimum-sample
|
|
90
|
+
constraints.
|
|
91
|
+
"""
|
|
92
|
+
n_samples, n_comp = ys.shape
|
|
93
|
+
keep = np.ones(n_samples, dtype=bool)
|
|
94
|
+
n_trim = 0
|
|
95
|
+
|
|
96
|
+
last_coeffs = None
|
|
97
|
+
last_keep = keep.copy()
|
|
98
|
+
last_ok = False
|
|
99
|
+
|
|
100
|
+
needed = max(config.min_samples, degree + 1)
|
|
101
|
+
|
|
102
|
+
while keep.sum() >= needed and n_trim <= config.max_trim:
|
|
103
|
+
idx = np.where(keep)[0]
|
|
104
|
+
x_use = xs[idx]
|
|
105
|
+
y_use = ys[idx]
|
|
106
|
+
|
|
107
|
+
mat = design_matrix(x0, config, x_use, degree)
|
|
108
|
+
coeffs, *_ = np.linalg.lstsq(mat, y_use, rcond=None)
|
|
109
|
+
|
|
110
|
+
y_fit = mat @ coeffs
|
|
111
|
+
denom = np.maximum(np.abs(y_use), config.tol_abs)
|
|
112
|
+
err = np.abs(y_fit - y_use) / denom
|
|
113
|
+
|
|
114
|
+
bad_rows = (err > config.tol_rel).any(axis=1)
|
|
115
|
+
if not bad_rows.any():
|
|
116
|
+
last_coeffs = coeffs
|
|
117
|
+
last_keep = keep.copy()
|
|
118
|
+
last_ok = True
|
|
119
|
+
break
|
|
120
|
+
|
|
121
|
+
bad_idx_all = idx[bad_rows]
|
|
122
|
+
leftmost, rightmost = idx[0], idx[-1]
|
|
123
|
+
trimmed = False
|
|
124
|
+
|
|
125
|
+
# shave edges only if we keep enough for this degree
|
|
126
|
+
if bad_idx_all[0] == leftmost and keep.sum() - 1 >= needed:
|
|
127
|
+
keep[leftmost] = False
|
|
128
|
+
trimmed = True
|
|
129
|
+
if bad_idx_all[-1] == rightmost and keep.sum() - 1 >= needed:
|
|
130
|
+
keep[rightmost] = False
|
|
131
|
+
trimmed = True
|
|
132
|
+
|
|
133
|
+
if not trimmed:
|
|
134
|
+
last_coeffs = coeffs
|
|
135
|
+
last_keep = keep.copy()
|
|
136
|
+
last_ok = False
|
|
137
|
+
break
|
|
138
|
+
|
|
139
|
+
last_coeffs = coeffs
|
|
140
|
+
last_keep = keep.copy()
|
|
141
|
+
last_ok = False
|
|
142
|
+
n_trim += 1
|
|
143
|
+
|
|
144
|
+
if last_coeffs is None:
|
|
145
|
+
last_coeffs = np.zeros((degree + 1, n_comp), dtype=float)
|
|
146
|
+
last_keep = keep.copy()
|
|
147
|
+
last_ok = False
|
|
148
|
+
|
|
149
|
+
return last_coeffs, last_keep, last_ok
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def centered_polyfit_least_squares(
|
|
153
|
+
x0: float,
|
|
154
|
+
xs: np.ndarray,
|
|
155
|
+
ys: np.ndarray,
|
|
156
|
+
degree: int,
|
|
157
|
+
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
158
|
+
"""Calculates a plain least-squares polynomial fit in powers of (x - x0).
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
x0: Expansion point.
|
|
162
|
+
xs: Sample locations (1D array-like).
|
|
163
|
+
ys: Sample values (shape ``(n_samples,)`` or ``(n_samples, n_comp)``).
|
|
164
|
+
degree: Polynomial degree.
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
A tuple containing
|
|
168
|
+
|
|
169
|
+
- An array of shape ``(degree+1, n_comp)`` with coefficients for
|
|
170
|
+
:math:`sum_k a_k (x - x0)^k`.
|
|
171
|
+
- A boolean mask of length ``n_samples`` (all ``True`` here).
|
|
172
|
+
- An array of shape ``(degree+1, n)``.
|
|
173
|
+
"""
|
|
174
|
+
xs = np.asarray(xs, dtype=float)
|
|
175
|
+
ys = np.asarray(ys)
|
|
176
|
+
|
|
177
|
+
if ys.ndim == 1:
|
|
178
|
+
ys = ys[:, None]
|
|
179
|
+
|
|
180
|
+
t = xs - x0
|
|
181
|
+
vander = np.vander(t, N=degree + 1, increasing=True) # (n_samples, degree+1)
|
|
182
|
+
|
|
183
|
+
coeffs, *_ = np.linalg.lstsq(vander, ys, rcond=None)
|
|
184
|
+
used_mask = np.ones(xs.shape[0], dtype=bool)
|
|
185
|
+
|
|
186
|
+
y_fit = vander @ coeffs
|
|
187
|
+
residuals = y_fit - ys # (n_samples, n_comp)
|
|
188
|
+
n_samples = vander.shape[0]
|
|
189
|
+
n_params = degree + 1
|
|
190
|
+
dof = max(n_samples - n_params, 1)
|
|
191
|
+
|
|
192
|
+
sigma2 = np.sum(residuals ** 2, axis=0) / dof # shape (n_comp,)
|
|
193
|
+
|
|
194
|
+
xtx_inv = np.linalg.inv(vander.T @ vander) # (p, p)
|
|
195
|
+
diag_xtx_inv = np.diag(xtx_inv)[:, None] # (p, 1)
|
|
196
|
+
|
|
197
|
+
coeff_std = np.sqrt(diag_xtx_inv * sigma2[None, :])
|
|
198
|
+
|
|
199
|
+
return coeffs, used_mask, coeff_std
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"""Configuration for the local polynomial regression baseline.
|
|
2
|
+
|
|
3
|
+
This config controls how
|
|
4
|
+
:class:`derivkit.local_polynomial_derivative.local_polynomial_derivative.LocalPolynomialDerivative`
|
|
5
|
+
chooses sample locations, fits the local polynomial, and decides whether the
|
|
6
|
+
fit is trustworthy enough to mark ``ok=True`` in diagnostics.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class LocalPolyConfig:
|
|
13
|
+
"""Configuration for the local polynomial regression baseline.
|
|
14
|
+
|
|
15
|
+
This config controls how
|
|
16
|
+
:class:`derivkit.local_polynomial_derivative.local_polynomial_derivative.LocalPolynomialDerivative`
|
|
17
|
+
chooses sample locations, fits the local polynomial, and decides whether the
|
|
18
|
+
fit is trustworthy enough to mark ``ok=True`` in diagnostics.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
rel_steps=(0.01, 0.02, 0.04, 0.08),
|
|
24
|
+
tol_rel: float = 0.01,
|
|
25
|
+
tol_abs: float = 1e-10,
|
|
26
|
+
min_samples: int = 9,
|
|
27
|
+
max_trim: int = 10,
|
|
28
|
+
max_degree: int = 7,
|
|
29
|
+
center: bool = True,
|
|
30
|
+
):
|
|
31
|
+
"""Initialize configuration.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
rel_steps:
|
|
35
|
+
Symmetric relative offsets around ``x0`` used to build the
|
|
36
|
+
default sample grid.
|
|
37
|
+
|
|
38
|
+
- For ``x0 != 0`` the grid is
|
|
39
|
+
``x = x0 * (1 ± rel_steps[i])``.
|
|
40
|
+
- For ``x0 == 0`` the grid is
|
|
41
|
+
``x = ± rel_steps[i]``.
|
|
42
|
+
|
|
43
|
+
Values are deduplicated and sorted; must be a non-empty
|
|
44
|
+
1D sequence.
|
|
45
|
+
|
|
46
|
+
tol_rel:
|
|
47
|
+
Relative residual tolerance used when deciding whether a
|
|
48
|
+
sample row is consistent with the current polynomial fit.
|
|
49
|
+
A row is flagged as "bad" if any component satisfies
|
|
50
|
+
``|y_fit - y| / max(|y|, tol_abs) > tol_rel``.
|
|
51
|
+
Lower values make trimming more aggressive.
|
|
52
|
+
|
|
53
|
+
tol_abs:
|
|
54
|
+
Absolute floor in the residual normalization. Prevents
|
|
55
|
+
division by very small ``|y|`` when computing relative
|
|
56
|
+
errors. Used as
|
|
57
|
+
``denom = max(|y|, tol_abs)``.
|
|
58
|
+
|
|
59
|
+
min_samples:
|
|
60
|
+
Minimum number of sample points that must remain after
|
|
61
|
+
trimming for a fit to be considered. Also used (together
|
|
62
|
+
with ``max_degree``) to ensure the system is not
|
|
63
|
+
underdetermined. If trimming would reduce the usable
|
|
64
|
+
samples below this threshold, trimming stops.
|
|
65
|
+
|
|
66
|
+
max_trim:
|
|
67
|
+
Maximum number of trimming iterations. Each iteration may
|
|
68
|
+
remove at most one point from each edge of the grid.
|
|
69
|
+
Acts as a safety bound to avoid pathological loops on
|
|
70
|
+
extremely noisy or adversarial data.
|
|
71
|
+
|
|
72
|
+
max_degree:
|
|
73
|
+
Maximum polynomial degree allowed for the local fit.
|
|
74
|
+
The actual degree used in :meth:`differentiate` is
|
|
75
|
+
``min(max_degree, chosen_degree)`` where
|
|
76
|
+
``chosen_degree`` is usually ``max(order + 2, 3)`` or an
|
|
77
|
+
explicit ``degree=`` passed by the caller.
|
|
78
|
+
|
|
79
|
+
center:
|
|
80
|
+
If ``True``, the polynomial is expressed in powers of
|
|
81
|
+
``(x - x0)``; derivatives at ``x0`` are then read off as
|
|
82
|
+
``k! * a_k``. If ``False``, the polynomial is in powers
|
|
83
|
+
of ``x`` directly. Centering generally improves numerical
|
|
84
|
+
stability and is recommended.
|
|
85
|
+
|
|
86
|
+
"""
|
|
87
|
+
# Normalize to a sorted, deduplicated tuple of floats
|
|
88
|
+
self.rel_steps = tuple(float(s) for s in rel_steps)
|
|
89
|
+
|
|
90
|
+
self.tol_rel = tol_rel
|
|
91
|
+
self.tol_abs = tol_abs
|
|
92
|
+
self.min_samples = min_samples
|
|
93
|
+
self.max_trim = max_trim
|
|
94
|
+
self.max_degree = max_degree
|
|
95
|
+
self.center = center
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
"""Local polynomial-regression derivative estimator."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import math
|
|
6
|
+
from typing import Any, Callable
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
|
|
10
|
+
from derivkit.derivatives.local_polynomial_derivative.diagnostics import (
|
|
11
|
+
make_diagnostics,
|
|
12
|
+
)
|
|
13
|
+
from derivkit.derivatives.local_polynomial_derivative.fit import (
|
|
14
|
+
centered_polyfit_least_squares,
|
|
15
|
+
trimmed_polyfit,
|
|
16
|
+
)
|
|
17
|
+
from derivkit.derivatives.local_polynomial_derivative.local_poly_config import (
|
|
18
|
+
LocalPolyConfig,
|
|
19
|
+
)
|
|
20
|
+
from derivkit.derivatives.local_polynomial_derivative.sampling import (
|
|
21
|
+
build_samples,
|
|
22
|
+
)
|
|
23
|
+
from derivkit.utils.numerics import relative_error
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class LocalPolynomialDerivative:
|
|
27
|
+
"""Estimates derivatives via trimmed local polynomial regression around x0."""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
func: Callable[[float], Any],
|
|
32
|
+
x0: float,
|
|
33
|
+
config: LocalPolyConfig | None = None,
|
|
34
|
+
):
|
|
35
|
+
"""Initializes the LocalPolynomialDerivative instance.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
func:
|
|
39
|
+
Function to differentiate. It should take a float and return either
|
|
40
|
+
a scalar or a NumPy array (vector or tensor); derivatives are
|
|
41
|
+
computed componentwise with the same output shape.
|
|
42
|
+
x0:
|
|
43
|
+
The point at which to estimate the derivative.
|
|
44
|
+
config:
|
|
45
|
+
An optional :class:`derivkit.local_polynomial_derivative.local_poly_config.LocalPolyConfig`
|
|
46
|
+
instance with configuration settings.
|
|
47
|
+
"""
|
|
48
|
+
self.func = func
|
|
49
|
+
self.x0 = float(x0)
|
|
50
|
+
self.config = config or LocalPolyConfig()
|
|
51
|
+
|
|
52
|
+
def differentiate(
|
|
53
|
+
self,
|
|
54
|
+
order: int = 1,
|
|
55
|
+
degree: int | None = None,
|
|
56
|
+
n_workers: int = 1,
|
|
57
|
+
return_error: bool = False,
|
|
58
|
+
diagnostics: bool = False,
|
|
59
|
+
):
|
|
60
|
+
"""Local polynomial-regression derivative estimator.
|
|
61
|
+
|
|
62
|
+
This class estimates derivative at ``x0`` by sampling the function in a
|
|
63
|
+
small neighborhood around that point, fitting a polynomial to those samples,
|
|
64
|
+
and trimming away samples whose residuals are inconsistent with the fit.
|
|
65
|
+
Once a stable local polynomial is obtained, the k-th derivative is read off
|
|
66
|
+
directly from the coefficient of the fitted polynomial (``k! * a_k``). The
|
|
67
|
+
method works for scalar or vector/tensor-valued functions, and can optionally return
|
|
68
|
+
a diagnostics dictionary showing which samples were used, how trimming
|
|
69
|
+
behaved, and whether the final fit passed all internal checks.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
order: The order of the derivative to estimate (must be >= 1).
|
|
73
|
+
degree: The degree of the polynomial fit. If ``None``, it is set to
|
|
74
|
+
``max(order + 2, 3)`` but capped by ``self.config.max_degree``.
|
|
75
|
+
n_workers: The number of parallel workers for function evaluation
|
|
76
|
+
(must be >= 1).
|
|
77
|
+
return_error: If ``True``, also returns a relative error estimate
|
|
78
|
+
based on the disagreement between trimmed and least-squares fits.
|
|
79
|
+
diagnostics: If ``True``, returns a diagnostics dictionary along with
|
|
80
|
+
the derivative estimate.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
The return type depends on ``return_error`` and ``diagnostics``:
|
|
84
|
+
|
|
85
|
+
- If ``return_error`` is False and ``diagnostics`` is False:
|
|
86
|
+
the estimated derivative (float or np.ndarray).
|
|
87
|
+
- If ``return_error`` is True and ``diagnostics`` is False:
|
|
88
|
+
``(derivative, error)``.
|
|
89
|
+
- If ``return_error`` is False and ``diagnostics`` is True:
|
|
90
|
+
``(derivative, diagnostics_dict)``.
|
|
91
|
+
- If both ``return_error`` and ``diagnostics`` are True:
|
|
92
|
+
``(derivative, error, diagnostics_dict)``.
|
|
93
|
+
|
|
94
|
+
Raises:
|
|
95
|
+
ValueError:
|
|
96
|
+
If order < 1, n_workers < 1, or degree < order.
|
|
97
|
+
"""
|
|
98
|
+
if order < 1:
|
|
99
|
+
raise ValueError(f"order must be at least 1 but is {order}.")
|
|
100
|
+
if n_workers < 1:
|
|
101
|
+
raise ValueError(f"n_workers must be at least 1 but is {n_workers}.")
|
|
102
|
+
|
|
103
|
+
# Choose polynomial degree with a bit of headroom.
|
|
104
|
+
if degree is None:
|
|
105
|
+
degree = max(order + 2, 3)
|
|
106
|
+
degree = int(min(degree, self.config.max_degree))
|
|
107
|
+
if degree < order:
|
|
108
|
+
raise ValueError("degree must be >= order.")
|
|
109
|
+
|
|
110
|
+
xs, ys = build_samples(self.func, self.x0, self.config, n_workers=n_workers)
|
|
111
|
+
|
|
112
|
+
# First, try the trimmed fit
|
|
113
|
+
coeffs_trim, used_mask_trim, ok = trimmed_polyfit(
|
|
114
|
+
self.x0, self.config, xs, ys, degree
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Always compute LS fit as a backup / cross-check
|
|
118
|
+
coeffs_ls, used_mask_ls, coeff_std_ls = centered_polyfit_least_squares(
|
|
119
|
+
self.x0, xs, ys, degree
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Decide which coefficients to trust
|
|
123
|
+
if not ok:
|
|
124
|
+
# Trimmed fit failed -> trust LS and estimate error from LS statistics.
|
|
125
|
+
coeffs = coeffs_ls
|
|
126
|
+
used_mask = used_mask_ls
|
|
127
|
+
fit_type = "least_squares"
|
|
128
|
+
|
|
129
|
+
factorial = math.factorial(order)
|
|
130
|
+
a_k_ls = coeffs_ls[order] # shape (n_comp,)
|
|
131
|
+
sigma_ak = coeff_std_ls[order] # shape (n_comp,)
|
|
132
|
+
|
|
133
|
+
deriv_ls = factorial * a_k_ls
|
|
134
|
+
sigma_deriv = factorial * sigma_ak
|
|
135
|
+
|
|
136
|
+
tiny = np.finfo(float).tiny # this avoids division by zero
|
|
137
|
+
err = np.abs(sigma_deriv) / np.maximum(np.abs(deriv_ls), tiny)
|
|
138
|
+
else:
|
|
139
|
+
# Both fits available -> compare their implied derivatives
|
|
140
|
+
coeffs_trim = np.asarray(coeffs_trim)
|
|
141
|
+
coeffs_ls = np.asarray(coeffs_ls)
|
|
142
|
+
if coeffs_trim.ndim == 1:
|
|
143
|
+
coeffs_trim = coeffs_trim[:, None]
|
|
144
|
+
if coeffs_ls.ndim == 1:
|
|
145
|
+
coeffs_ls = coeffs_ls[:, None]
|
|
146
|
+
|
|
147
|
+
# Derivative from trimmed fit
|
|
148
|
+
deriv_trim = math.factorial(order) * coeffs_trim[order]
|
|
149
|
+
# Derivative from LS fit
|
|
150
|
+
deriv_ls = math.factorial(order) * coeffs_ls[order]
|
|
151
|
+
|
|
152
|
+
err = relative_error(deriv_trim, deriv_ls)
|
|
153
|
+
# Tolerance can be tuned; keep it modest so polynomials/sin pass.
|
|
154
|
+
rel_err_tol = 1e-3
|
|
155
|
+
|
|
156
|
+
if err <= rel_err_tol:
|
|
157
|
+
coeffs = coeffs_trim
|
|
158
|
+
used_mask = used_mask_trim
|
|
159
|
+
fit_type = "trimmed"
|
|
160
|
+
else:
|
|
161
|
+
coeffs = coeffs_ls
|
|
162
|
+
used_mask = used_mask_ls
|
|
163
|
+
fit_type = "least_squares"
|
|
164
|
+
|
|
165
|
+
coeffs = np.asarray(coeffs)
|
|
166
|
+
if coeffs.ndim == 1:
|
|
167
|
+
coeffs = coeffs[:, None]
|
|
168
|
+
|
|
169
|
+
n_comp = coeffs.shape[1]
|
|
170
|
+
factorial = math.factorial(order)
|
|
171
|
+
a_k = coeffs[order]
|
|
172
|
+
deriv = factorial * a_k
|
|
173
|
+
deriv_out = float(deriv[0]) if n_comp == 1 else deriv
|
|
174
|
+
|
|
175
|
+
# Make error output shape match the derivative shape
|
|
176
|
+
err_arr = np.asarray(err)
|
|
177
|
+
if n_comp == 1 and err_arr.ndim > 0:
|
|
178
|
+
err_out = float(err_arr[0])
|
|
179
|
+
else:
|
|
180
|
+
err_out = err_arr
|
|
181
|
+
|
|
182
|
+
if diagnostics:
|
|
183
|
+
diag = make_diagnostics(
|
|
184
|
+
self.x0,
|
|
185
|
+
self.config,
|
|
186
|
+
xs,
|
|
187
|
+
ys,
|
|
188
|
+
used_mask,
|
|
189
|
+
coeffs,
|
|
190
|
+
degree,
|
|
191
|
+
order,
|
|
192
|
+
ok,
|
|
193
|
+
)
|
|
194
|
+
diag["n_workers"] = int(n_workers)
|
|
195
|
+
diag["fit_type"] = fit_type
|
|
196
|
+
|
|
197
|
+
if return_error:
|
|
198
|
+
return deriv_out, err_out, diag
|
|
199
|
+
return deriv_out, diag
|
|
200
|
+
|
|
201
|
+
# diagnostics is False
|
|
202
|
+
if return_error:
|
|
203
|
+
return deriv_out, err_out
|
|
204
|
+
|
|
205
|
+
return deriv_out
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""Sampling utilities for local polynomial derivative estimation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
6
|
+
from typing import Any, Callable
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
|
|
10
|
+
from derivkit.derivatives.local_polynomial_derivative.local_poly_config import (
|
|
11
|
+
LocalPolyConfig,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all__ = ["build_samples"]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def build_samples(
|
|
18
|
+
func: Callable[[float], Any],
|
|
19
|
+
x0: float,
|
|
20
|
+
config: LocalPolyConfig,
|
|
21
|
+
n_workers: int = 1,
|
|
22
|
+
):
|
|
23
|
+
"""Builds sample points for a function around a central value.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
func:
|
|
27
|
+
Function to evaluate. Takes a float and returns a scalar or
|
|
28
|
+
np.ndarray.
|
|
29
|
+
x0:
|
|
30
|
+
Point around which to sample.
|
|
31
|
+
config:
|
|
32
|
+
LocalPolyConfig instance with sampling settings.
|
|
33
|
+
n_workers:
|
|
34
|
+
Number of parallel workers for function evaluation.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
xs: An array of sample points (shape ``(n_samples,)``).
|
|
38
|
+
ys: An array of function evaluations (shape ``(n_samples, n_components)``).
|
|
39
|
+
|
|
40
|
+
Raises:
|
|
41
|
+
ValueError:
|
|
42
|
+
if rel_steps in config is not a 1D non-empty sequence of floats.
|
|
43
|
+
"""
|
|
44
|
+
rel_steps = np.asarray(config.rel_steps, float)
|
|
45
|
+
if rel_steps.ndim != 1 or rel_steps.size == 0:
|
|
46
|
+
raise ValueError("rel_steps must be a 1D non-empty sequence of floats.")
|
|
47
|
+
|
|
48
|
+
if np.isscalar(rel_steps):
|
|
49
|
+
rel_steps = np.array([rel_steps], dtype=float)
|
|
50
|
+
|
|
51
|
+
if x0 == 0.0:
|
|
52
|
+
xs = np.concatenate([-rel_steps, rel_steps])
|
|
53
|
+
else:
|
|
54
|
+
xs = x0 * (1.0 + np.concatenate([-rel_steps, rel_steps]))
|
|
55
|
+
|
|
56
|
+
xs = np.unique(xs)
|
|
57
|
+
xs.sort()
|
|
58
|
+
|
|
59
|
+
if n_workers == 1:
|
|
60
|
+
ys_list = [np.atleast_1d(func(float(x))) for x in xs]
|
|
61
|
+
else:
|
|
62
|
+
def _eval_one(x):
|
|
63
|
+
return np.atleast_1d(func(float(x)))
|
|
64
|
+
|
|
65
|
+
with ThreadPoolExecutor(max_workers=n_workers) as ex:
|
|
66
|
+
ys_list = list(ex.map(_eval_one, xs))
|
|
67
|
+
|
|
68
|
+
ys = np.stack(ys_list, axis=0)
|
|
69
|
+
if ys.ndim != 2:
|
|
70
|
+
ys = ys.reshape(ys.shape[0], -1)
|
|
71
|
+
|
|
72
|
+
return xs, ys
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Tabulate module for formatting data in tables."""
|