derivkit 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- derivkit/__init__.py +22 -0
- derivkit/calculus/__init__.py +17 -0
- derivkit/calculus/calculus_core.py +152 -0
- derivkit/calculus/gradient.py +97 -0
- derivkit/calculus/hessian.py +528 -0
- derivkit/calculus/hyper_hessian.py +296 -0
- derivkit/calculus/jacobian.py +156 -0
- derivkit/calculus_kit.py +128 -0
- derivkit/derivative_kit.py +315 -0
- derivkit/derivatives/__init__.py +6 -0
- derivkit/derivatives/adaptive/__init__.py +5 -0
- derivkit/derivatives/adaptive/adaptive_fit.py +238 -0
- derivkit/derivatives/adaptive/batch_eval.py +179 -0
- derivkit/derivatives/adaptive/diagnostics.py +325 -0
- derivkit/derivatives/adaptive/grid.py +333 -0
- derivkit/derivatives/adaptive/polyfit_utils.py +513 -0
- derivkit/derivatives/adaptive/spacing.py +66 -0
- derivkit/derivatives/adaptive/transforms.py +245 -0
- derivkit/derivatives/autodiff/__init__.py +1 -0
- derivkit/derivatives/autodiff/jax_autodiff.py +95 -0
- derivkit/derivatives/autodiff/jax_core.py +217 -0
- derivkit/derivatives/autodiff/jax_utils.py +146 -0
- derivkit/derivatives/finite/__init__.py +5 -0
- derivkit/derivatives/finite/batch_eval.py +91 -0
- derivkit/derivatives/finite/core.py +84 -0
- derivkit/derivatives/finite/extrapolators.py +511 -0
- derivkit/derivatives/finite/finite_difference.py +247 -0
- derivkit/derivatives/finite/stencil.py +206 -0
- derivkit/derivatives/fornberg.py +245 -0
- derivkit/derivatives/local_polynomial_derivative/__init__.py +1 -0
- derivkit/derivatives/local_polynomial_derivative/diagnostics.py +90 -0
- derivkit/derivatives/local_polynomial_derivative/fit.py +199 -0
- derivkit/derivatives/local_polynomial_derivative/local_poly_config.py +95 -0
- derivkit/derivatives/local_polynomial_derivative/local_polynomial_derivative.py +205 -0
- derivkit/derivatives/local_polynomial_derivative/sampling.py +72 -0
- derivkit/derivatives/tabulated_model/__init__.py +1 -0
- derivkit/derivatives/tabulated_model/one_d.py +247 -0
- derivkit/forecast_kit.py +783 -0
- derivkit/forecasting/__init__.py +1 -0
- derivkit/forecasting/dali.py +78 -0
- derivkit/forecasting/expansions.py +486 -0
- derivkit/forecasting/fisher.py +298 -0
- derivkit/forecasting/fisher_gaussian.py +171 -0
- derivkit/forecasting/fisher_xy.py +357 -0
- derivkit/forecasting/forecast_core.py +313 -0
- derivkit/forecasting/getdist_dali_samples.py +429 -0
- derivkit/forecasting/getdist_fisher_samples.py +235 -0
- derivkit/forecasting/laplace.py +259 -0
- derivkit/forecasting/priors_core.py +860 -0
- derivkit/forecasting/sampling_utils.py +388 -0
- derivkit/likelihood_kit.py +114 -0
- derivkit/likelihoods/__init__.py +1 -0
- derivkit/likelihoods/gaussian.py +136 -0
- derivkit/likelihoods/poisson.py +176 -0
- derivkit/utils/__init__.py +13 -0
- derivkit/utils/concurrency.py +213 -0
- derivkit/utils/extrapolation.py +254 -0
- derivkit/utils/linalg.py +513 -0
- derivkit/utils/logger.py +26 -0
- derivkit/utils/numerics.py +262 -0
- derivkit/utils/sandbox.py +74 -0
- derivkit/utils/types.py +15 -0
- derivkit/utils/validate.py +811 -0
- derivkit-1.0.0.dist-info/METADATA +50 -0
- derivkit-1.0.0.dist-info/RECORD +68 -0
- derivkit-1.0.0.dist-info/WHEEL +5 -0
- derivkit-1.0.0.dist-info/licenses/LICENSE +21 -0
- derivkit-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
"""Batch evaluation utilities for derivative estimation.
|
|
2
|
+
|
|
3
|
+
Evaluate a user function over a 1D grid with optional parallelism and return
|
|
4
|
+
a 2D array with consistent shape suitable for downstream polynomial fitting
|
|
5
|
+
and diagnostics (e.g., in :class:`adaptive.adaptive_fit.AdaptiveFitDerivative`).
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from collections.abc import Callable
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
import numpy as np
|
|
14
|
+
from multiprocess import Pool
|
|
15
|
+
|
|
16
|
+
__all__ = ["eval_function_batch"]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def eval_function_batch(
|
|
20
|
+
function: Callable[[float], Any],
|
|
21
|
+
xs: np.ndarray,
|
|
22
|
+
n_workers: int = 1,
|
|
23
|
+
) -> np.ndarray:
|
|
24
|
+
"""Evaluate a function over 1D inputs and return a (n_points, n_comp) float array.
|
|
25
|
+
|
|
26
|
+
Evaluates ``function(x)`` for each ``x`` in ``xs``. If ``n_workers > 1``,
|
|
27
|
+
uses a ``multiprocess.Pool``; otherwise runs serially. Scalars become a single
|
|
28
|
+
column. For array outputs, this routine coerces to a consistent 2D shape that
|
|
29
|
+
downstream polynomial-fitting code expects.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
function: Callable mapping a float to a scalar or array-like. Must be
|
|
33
|
+
picklable if used with multiple processes.
|
|
34
|
+
xs: 1D array of abscissae.
|
|
35
|
+
n_workers: If > 1, evaluate in parallel using ``multiprocess.Pool``.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
np.ndarray: Array of shape ``(n_points, n_comp)`` with dtype ``float``.
|
|
39
|
+
|
|
40
|
+
Raises:
|
|
41
|
+
ValueError: If ``xs`` is not 1D or outputs cannot be coerced consistently.
|
|
42
|
+
|
|
43
|
+
Examples:
|
|
44
|
+
>>> import numpy as np
|
|
45
|
+
>>> from derivkit.derivatives.adaptive.batch_eval import eval_function_batch
|
|
46
|
+
>>> def f(x):
|
|
47
|
+
... return np.array([x, x**2])
|
|
48
|
+
>>> xs = np.linspace(-1.0, 1.0, 5)
|
|
49
|
+
>>> y = eval_function_batch(f, xs)
|
|
50
|
+
>>> y.shape
|
|
51
|
+
(5, 2)
|
|
52
|
+
"""
|
|
53
|
+
xs = np.asarray(xs, dtype=float)
|
|
54
|
+
if xs.ndim != 1:
|
|
55
|
+
raise ValueError(
|
|
56
|
+
f"eval_function_batch: xs.ndim must be 1 but is {xs.ndim}."
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
ys = (
|
|
60
|
+
_eval_parallel(function, xs, n_workers)
|
|
61
|
+
if n_workers > 1
|
|
62
|
+
else _eval_serial(function, xs)
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
# Convert outputs to a consistent 2D float array (n_points × n_outputs).
|
|
66
|
+
y = _coerce_stack(ys, n_points=xs.size)
|
|
67
|
+
|
|
68
|
+
if not np.all(np.isfinite(y)):
|
|
69
|
+
pass
|
|
70
|
+
|
|
71
|
+
return y
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _eval_serial(
|
|
75
|
+
function: Callable[[float], Any], xs: np.ndarray
|
|
76
|
+
) -> list[np.ndarray]:
|
|
77
|
+
"""Evaluate a function over points serially.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
function: Callable mapping a float to a scalar or array-like.
|
|
81
|
+
xs: 1D array of x-axis points to evaluate.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
list[np.ndarray]: One array per input x (each at least 1D).
|
|
85
|
+
"""
|
|
86
|
+
return [np.atleast_1d(function(float(x))) for x in xs]
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _eval_parallel(
|
|
90
|
+
function: Callable[[float], Any],
|
|
91
|
+
xs: np.ndarray,
|
|
92
|
+
n_workers: int,
|
|
93
|
+
) -> list[np.ndarray]:
|
|
94
|
+
"""Evaluate a function over points in parallel using multiprocess.Pool.
|
|
95
|
+
|
|
96
|
+
Falls back to the serial path for tiny workloads or if pool creation/execution fails.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
function: Maps a float to a scalar or array-like.
|
|
100
|
+
xs: 1D points on x-axis to evaluate.
|
|
101
|
+
n_workers: Desired number of processes.
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
list[np.ndarray]: One 1D array per input, order-preserving.
|
|
105
|
+
"""
|
|
106
|
+
if n_workers <= 1:
|
|
107
|
+
return _eval_serial(function, xs)
|
|
108
|
+
|
|
109
|
+
# Avoid pool overhead for very small batches.
|
|
110
|
+
n = max(1, min(int(n_workers), int(xs.size)))
|
|
111
|
+
if xs.size < max(8, 2 * n):
|
|
112
|
+
return _eval_serial(function, xs)
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
with Pool(n) as pool:
|
|
116
|
+
ys = pool.map(function, xs.tolist())
|
|
117
|
+
except Exception:
|
|
118
|
+
# Spawn/pickle/start-method issues → graceful serial fallback.
|
|
119
|
+
return _eval_serial(function, xs)
|
|
120
|
+
|
|
121
|
+
return [np.atleast_1d(y) for y in ys]
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _coerce_stack(ys: list[np.ndarray], n_points: int) -> np.ndarray:
|
|
125
|
+
"""Coerce a list of per-point outputs into an (n_points, n_comp) float array.
|
|
126
|
+
|
|
127
|
+
The user function may return scalars or arrays of varying shapes. This routine
|
|
128
|
+
coerces them into a consistent 2D array shape that downstream code expects. The rules are:
|
|
129
|
+
- scalar → column vector
|
|
130
|
+
- 1D → row
|
|
131
|
+
- 2D with a transposed batch (n_comp, n_points) → auto-transpose
|
|
132
|
+
- higher-D → flattened per row
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
ys: List of arrays, one per input point. Each array is at least 1
|
|
136
|
+
dimensional (scalars become shape (1,)).
|
|
137
|
+
n_points: Number of input points (length of ys).
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
np.ndarray: Array of shape (n_points, n_comp) with dtype float.
|
|
141
|
+
|
|
142
|
+
Raises:
|
|
143
|
+
ValueError: If outputs cannot be coerced to a consistent shape.
|
|
144
|
+
"""
|
|
145
|
+
arr = np.asarray(ys, dtype=float)
|
|
146
|
+
|
|
147
|
+
# Common cases fast-path
|
|
148
|
+
if arr.ndim == 1:
|
|
149
|
+
# all scalars
|
|
150
|
+
return arr.reshape(n_points, 1)
|
|
151
|
+
if arr.ndim == 2 and arr.shape[0] == n_points:
|
|
152
|
+
# already (n_points, n_comp)
|
|
153
|
+
return arr
|
|
154
|
+
if arr.ndim == 2 and arr.shape[1] == n_points:
|
|
155
|
+
# likely (n_comp, n_points) → transpose
|
|
156
|
+
return arr.T
|
|
157
|
+
|
|
158
|
+
# Fallback: stack row-wise and flatten per sample if needed.
|
|
159
|
+
rows = []
|
|
160
|
+
for y in ys:
|
|
161
|
+
y = np.asarray(y, dtype=float)
|
|
162
|
+
if y.ndim == 0:
|
|
163
|
+
y = y.reshape(1)
|
|
164
|
+
elif y.ndim >= 2:
|
|
165
|
+
y = y.reshape(-1) # flatten higher-D deterministically
|
|
166
|
+
rows.append(y)
|
|
167
|
+
y = np.vstack(rows)
|
|
168
|
+
|
|
169
|
+
# Ensure (n_points, n_comp)
|
|
170
|
+
if y.shape[0] != n_points:
|
|
171
|
+
# If the function accidentally returned (n_comp, n_points), fix it once more.
|
|
172
|
+
if y.shape[1] == n_points and y.shape[0] != n_points:
|
|
173
|
+
y = y.T
|
|
174
|
+
else:
|
|
175
|
+
raise ValueError(
|
|
176
|
+
f"eval_function_batch: cannot coerce outputs to (n_points, n_comp); "
|
|
177
|
+
f"got shape {y.shape} for n_points={n_points}"
|
|
178
|
+
)
|
|
179
|
+
return y
|
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
"""Diagnostics for derivative approximations."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, Optional
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
from numpy.typing import NDArray
|
|
9
|
+
|
|
10
|
+
from derivkit.derivatives.adaptive.polyfit_utils import assess_polyfit_quality
|
|
11
|
+
from derivkit.utils.logger import derivkit_logger
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"format_derivative_diagnostics",
|
|
15
|
+
"print_derivative_diagnostics",
|
|
16
|
+
"make_derivative_diag",
|
|
17
|
+
"fit_is_obviously_bad"
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def format_derivative_diagnostics(
|
|
22
|
+
diag: Dict[str, Any],
|
|
23
|
+
*,
|
|
24
|
+
meta: Optional[Dict[str, Any]] = None,
|
|
25
|
+
decimals: int = 4,
|
|
26
|
+
max_rows: int = 12,
|
|
27
|
+
) -> str:
|
|
28
|
+
"""Format derivative diagnostics into a human-readable string.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
diag: Diagnostics dictionary as returned by
|
|
32
|
+
:func:`adaptive.diagnostics.make_derivative_diag`.
|
|
33
|
+
meta: Optional metadata dictionary to include in the output.
|
|
34
|
+
decimals: Number of decimal places for floating-point numbers.
|
|
35
|
+
max_rows: Maximum number of rows to display for arrays; larger arrays are truncated.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
A formatted string summarizing the diagnostics.
|
|
39
|
+
"""
|
|
40
|
+
if not isinstance(diag, dict):
|
|
41
|
+
return "‹diagnostics unavailable›"
|
|
42
|
+
|
|
43
|
+
x = np.asarray(diag.get("x", []), float)
|
|
44
|
+
t = np.asarray(diag.get("t", []), float)
|
|
45
|
+
y = np.asarray(diag.get("y", []), float)
|
|
46
|
+
degree = diag.get("degree", None)
|
|
47
|
+
|
|
48
|
+
step_min = step_max = None
|
|
49
|
+
if t.size >= 2:
|
|
50
|
+
dt = np.diff(np.sort(t))
|
|
51
|
+
step_min = float(np.min(dt))
|
|
52
|
+
step_max = float(np.max(dt))
|
|
53
|
+
uniformish = (
|
|
54
|
+
step_min is not None
|
|
55
|
+
and step_max is not None
|
|
56
|
+
and abs(step_max - step_min) <= 1e-12 * max(1.0, step_max)
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
with np.printoptions(precision=decimals, suppress=True):
|
|
60
|
+
lines = ["=== Derivative Diagnostics ==="]
|
|
61
|
+
if meta:
|
|
62
|
+
lines.append("Meta:")
|
|
63
|
+
wanted = [
|
|
64
|
+
"x0",
|
|
65
|
+
"order",
|
|
66
|
+
"n_points",
|
|
67
|
+
"spacing",
|
|
68
|
+
"base_abs",
|
|
69
|
+
"spacing_resolved",
|
|
70
|
+
"n_workers",
|
|
71
|
+
"domain",
|
|
72
|
+
"mode",
|
|
73
|
+
"ridge",
|
|
74
|
+
]
|
|
75
|
+
for k in wanted:
|
|
76
|
+
if k in meta:
|
|
77
|
+
lines.append(f" {k}={meta[k]}")
|
|
78
|
+
for k, v in meta.items():
|
|
79
|
+
if k not in wanted:
|
|
80
|
+
lines.append(f" {k}={v}")
|
|
81
|
+
lines.append("")
|
|
82
|
+
|
|
83
|
+
lines += [
|
|
84
|
+
"Grid:",
|
|
85
|
+
f" t offsets (preview): {_preview_1d(t, max_rows)}",
|
|
86
|
+
f" u offsets (preview): {_preview_1d(np.asarray(diag.get('u', []), float), max_rows)}",
|
|
87
|
+
f" x points (preview): {_preview_1d(x, max_rows)}",
|
|
88
|
+
]
|
|
89
|
+
if step_min is not None:
|
|
90
|
+
lines.append(
|
|
91
|
+
f"step_min={step_min:.{decimals}g}, "
|
|
92
|
+
f"step_max={step_max:.{decimals}g}, "
|
|
93
|
+
f"uniformish={uniformish}"
|
|
94
|
+
)
|
|
95
|
+
lines.append("")
|
|
96
|
+
|
|
97
|
+
lines.append("Samples y (rows correspond to x/t):")
|
|
98
|
+
lines.append(f"{_preview_2d_rows(y, max_rows)}")
|
|
99
|
+
lines.append("")
|
|
100
|
+
|
|
101
|
+
lines.append("Fit:")
|
|
102
|
+
lines.append(f" chosen degree(s): {degree}")
|
|
103
|
+
rrms = diag.get("rrms", None)
|
|
104
|
+
if rrms is not None:
|
|
105
|
+
lines.append(f" rrms: {rrms}")
|
|
106
|
+
|
|
107
|
+
fq = diag.get("fit_quality", None)
|
|
108
|
+
fs = diag.get("fit_suggestions", None)
|
|
109
|
+
if isinstance(fq, dict):
|
|
110
|
+
lines.append("")
|
|
111
|
+
lines.append("Fit quality:")
|
|
112
|
+
lines.append(
|
|
113
|
+
" rrms_rel={:.2e}, loo_rel={:.2e}, cond_vdm={:.2e}, deriv_rel={:.2e}".format(
|
|
114
|
+
fq.get("rrms_rel", float("nan")),
|
|
115
|
+
fq.get("loo_rel", float("nan")),
|
|
116
|
+
fq.get("cond_vdm", float("nan")),
|
|
117
|
+
fq.get("deriv_rel", float("nan")),
|
|
118
|
+
)
|
|
119
|
+
)
|
|
120
|
+
th = fq.get("thresholds", {})
|
|
121
|
+
if not isinstance(th, dict):
|
|
122
|
+
th = {}
|
|
123
|
+
lines.append(
|
|
124
|
+
" thresholds: rrms_rel={:.1e}, loo_rel={:.1e}, cond_vdm={:.1e}, deriv_rel={:.1e}".format(
|
|
125
|
+
th.get("rrms_rel", float("nan")),
|
|
126
|
+
th.get("loo_rel", float("nan")),
|
|
127
|
+
th.get("cond_vdm", float("nan")),
|
|
128
|
+
th.get("deriv_rel", float("nan")),
|
|
129
|
+
)
|
|
130
|
+
)
|
|
131
|
+
if isinstance(fs, (list, tuple)) and len(fs) > 0:
|
|
132
|
+
lines.append(" suggestions:")
|
|
133
|
+
for s in fs:
|
|
134
|
+
lines.append(f" - {s}")
|
|
135
|
+
|
|
136
|
+
return "\n".join(lines)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def print_derivative_diagnostics(
|
|
140
|
+
diag: Dict[str, Any], *, meta: Optional[Dict[str, Any]] = None
|
|
141
|
+
) -> None:
|
|
142
|
+
"""Print derivative diagnostics to standard output.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
diag: Diagnostics dictionary as returned by
|
|
146
|
+
:func:`adaptive.diagnostics.make_derivative_diag`.
|
|
147
|
+
meta: Optional metadata dictionary to include in the output.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
None
|
|
151
|
+
"""
|
|
152
|
+
derivkit_logger.info(format_derivative_diagnostics(diag, meta=meta))
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _preview_1d(a: np.ndarray, max_rows: int) -> np.ndarray:
|
|
156
|
+
"""Return a preview of a 1D array, truncating with NaN if too long.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
a: Input 1D array.
|
|
160
|
+
max_rows: Maximum number of rows to display.
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
A 1D array with at most ``max_rows`` elements, with NaN in the middle if truncated.
|
|
164
|
+
"""
|
|
165
|
+
a = np.asarray(a)
|
|
166
|
+
if a.ndim != 1 or a.size <= max_rows:
|
|
167
|
+
return a
|
|
168
|
+
k = max_rows // 2
|
|
169
|
+
return np.concatenate([a[:k], np.array([np.nan]), a[-k:]])
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def _preview_2d_rows(a: np.ndarray, max_rows: int) -> np.ndarray:
|
|
173
|
+
"""Return a preview of a 2D array by rows.
|
|
174
|
+
|
|
175
|
+
The preview is truncated with a NaN row if there are too many elements.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
a: Input 2D array.
|
|
179
|
+
max_rows: Maximum number of rows to display.
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
A 2D array with at most ``max_rows`` rows, with a NaN row in the middle if truncated.
|
|
183
|
+
"""
|
|
184
|
+
a = np.asarray(a)
|
|
185
|
+
if a.ndim != 2 or a.shape[0] <= max_rows:
|
|
186
|
+
return a
|
|
187
|
+
k = max_rows // 2
|
|
188
|
+
return np.vstack([a[:k], np.full((1, a.shape[1]), np.nan), a[-k:]])
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def make_derivative_diag(
|
|
192
|
+
*,
|
|
193
|
+
x: np.ndarray,
|
|
194
|
+
t: np.ndarray,
|
|
195
|
+
u: np.ndarray,
|
|
196
|
+
y: np.ndarray,
|
|
197
|
+
degree: int | list[int],
|
|
198
|
+
spacing_resolved: float | None = None,
|
|
199
|
+
rrms: Optional[NDArray[np.floating]] = None,
|
|
200
|
+
coeffs: Optional[NDArray[np.floating]] = None,
|
|
201
|
+
ridge: float | None = None,
|
|
202
|
+
order: int | None = None,
|
|
203
|
+
) -> dict:
|
|
204
|
+
"""Builds a lightweight diagnostics for a local polynomial derivative fit.
|
|
205
|
+
|
|
206
|
+
This assembles the core quantities used in plotting/printing diagnostics and,
|
|
207
|
+
when enough inputs are provided, augments them with polynomial-fit quality
|
|
208
|
+
metrics and human-readable suggestions.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
x: Absolute sample locations, shape ``(n_points,)`` where ``n_points`` is the
|
|
212
|
+
number of grid points where the function was evaluated.
|
|
213
|
+
t: Offsets relative to ``x0`` (``t = x - x0``), shape ``(n_points,)``.
|
|
214
|
+
u: Scaled offsets used in the polynomial basis (typically ``u = t / s``),
|
|
215
|
+
shape ``(n_points,)``.
|
|
216
|
+
y: Function evaluations at ``x``, shape ``(n_points, n_observables)``.
|
|
217
|
+
degree: Final polynomial degree used. May be an ``int`` or a per-observable
|
|
218
|
+
list of ints (length ``n_observables``).
|
|
219
|
+
spacing_resolved: Resolved spacing descriptor for the default grid (numeric
|
|
220
|
+
half-width or ``None`` if not applicable).
|
|
221
|
+
rrms: Relative RMS residuals of the fit, shape ``(n_observables,)`` (optional).
|
|
222
|
+
coeffs: Polynomial coefficients in the scaled basis, shape ``(deg+1, n_observables)`` (optional).
|
|
223
|
+
ridge: Ridge regularization strength used in the fit (optional).
|
|
224
|
+
order: Derivative order of interest (optional).
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
dict: A plain dictionary with fields suited for logging/printing/plotting.
|
|
228
|
+
|
|
229
|
+
Always present:
|
|
230
|
+
- ``"x"`` : ``np.ndarray`` with shape ``(n_points,)``
|
|
231
|
+
- ``"t"`` : ``np.ndarray`` with shape ``(n_points,)``
|
|
232
|
+
- ``"u"`` : ``np.ndarray`` with shape ``(n_points,)``
|
|
233
|
+
- ``"y"`` : ``np.ndarray`` with shape ``(n_points, n_observables)``
|
|
234
|
+
- ``"degree"`` : ``int`` or ``list[int]``
|
|
235
|
+
|
|
236
|
+
Included when available:
|
|
237
|
+
- ``"spacing_resolved"`` : ``float | None``
|
|
238
|
+
- ``"rrms"`` : ``np.ndarray`` or ``float``
|
|
239
|
+
|
|
240
|
+
Included when quality inputs are provided (``coeffs``, ``ridge``, ``order``):
|
|
241
|
+
- ``"fit_quality"`` : ``dict`` with keys like ``"rrms_rel"``, ``"loo_rel"``,
|
|
242
|
+
``"cond_vdm"``, ``"deriv_rel"``, and a nested ``"thresholds"`` dict.
|
|
243
|
+
- ``"fit_suggestions"`` : ``list[str]`` with human-readable hints.
|
|
244
|
+
"""
|
|
245
|
+
out: Dict[str, Any] = {
|
|
246
|
+
"x": x,
|
|
247
|
+
"t": t,
|
|
248
|
+
"u": u,
|
|
249
|
+
"y": y,
|
|
250
|
+
"degree": degree,
|
|
251
|
+
}
|
|
252
|
+
if spacing_resolved is not None:
|
|
253
|
+
out["spacing_resolved"] = float(spacing_resolved)
|
|
254
|
+
if rrms is not None:
|
|
255
|
+
out["rrms"] = rrms if not (rrms.ndim == 1 and rrms.size == 1) else float(rrms[0])
|
|
256
|
+
|
|
257
|
+
have_quality_args = (
|
|
258
|
+
coeffs is not None
|
|
259
|
+
and ridge is not None
|
|
260
|
+
and order is not None
|
|
261
|
+
)
|
|
262
|
+
if have_quality_args:
|
|
263
|
+
metrics, suggestions = assess_polyfit_quality(
|
|
264
|
+
u=u,
|
|
265
|
+
y=y,
|
|
266
|
+
coeffs=coeffs,
|
|
267
|
+
deg=(degree if isinstance(degree, int) else int(degree[0])),
|
|
268
|
+
ridge=float(ridge),
|
|
269
|
+
order=int(order),
|
|
270
|
+
)
|
|
271
|
+
out["fit_quality"] = metrics
|
|
272
|
+
out["fit_suggestions"] = suggestions
|
|
273
|
+
|
|
274
|
+
return out
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def fit_is_obviously_bad(metrics: dict) -> tuple[bool, str]:
|
|
278
|
+
"""Heuristically flag a clearly unstable polynomial fit and return a brief reason.
|
|
279
|
+
|
|
280
|
+
This inspects scalar diagnostics (from ``assess_polyfit_quality``) against amplified
|
|
281
|
+
thresholds. If any metric is far beyond its nominal limit, the fit is flagged.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
metrics: Dictionary with keys like:
|
|
285
|
+
- ``"rrms_rel"``, ``"loo_rel"``, ``"cond_vdm"``, ``"deriv_rel"``
|
|
286
|
+
- ``"thresholds"`` : ``dict`` of nominal per-metric thresholds
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
tuple[bool, str]: ``(is_bad, message)`` where:
|
|
290
|
+
- ``is_bad`` is ``True`` if any metric exceeds its amplified threshold
|
|
291
|
+
(×5 for ``rrms_rel``, ``loo_rel``, ``deriv_rel``; ×10 for ``cond_vdm``),
|
|
292
|
+
otherwise ``False``.
|
|
293
|
+
- ``message`` is a short human-readable summary when ``is_bad`` is ``True``,
|
|
294
|
+
otherwise ``""``.
|
|
295
|
+
|
|
296
|
+
Notes:
|
|
297
|
+
This is a soft, non-fatal screen for diagnostics/logging. Callers decide how to
|
|
298
|
+
react (warn, rebuild grid, widen spacing, add samples, increase ridge, etc.).
|
|
299
|
+
"""
|
|
300
|
+
th = metrics.get("thresholds", {})
|
|
301
|
+
|
|
302
|
+
rrms_rel = float(metrics.get("rrms_rel", 0.0))
|
|
303
|
+
loo_rel = float(metrics.get("loo_rel", 0.0))
|
|
304
|
+
cond_vdm = float(metrics.get("cond_vdm", 0.0))
|
|
305
|
+
deriv_rel = float(metrics.get("deriv_rel", 0.0))
|
|
306
|
+
|
|
307
|
+
# Only mark as bad when we're off by orders of magnitude vs nominal.
|
|
308
|
+
is_bad = (
|
|
309
|
+
rrms_rel > max(10.0 * th.get("rrms_rel", 1e-3), 1e-2)
|
|
310
|
+
or loo_rel > max(10.0 * th.get("loo_rel", 1e-3), 1e-2)
|
|
311
|
+
or cond_vdm > max(10.0 * th.get("cond_vdm", 1e8), 1e12)
|
|
312
|
+
or deriv_rel > max(10.0 * th.get("deriv_rel", 5e-3), 0.1)
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
if not is_bad:
|
|
316
|
+
return False, ""
|
|
317
|
+
|
|
318
|
+
msg = (
|
|
319
|
+
"Adaptive polynomial fit at this point looks numerically unstable. "
|
|
320
|
+
f"(rrms_rel={rrms_rel:.2e}, "
|
|
321
|
+
f"loo_rel={loo_rel:.2e}, "
|
|
322
|
+
f"cond_vdm={cond_vdm:.2e}, "
|
|
323
|
+
f"deriv_rel={deriv_rel:.2e})"
|
|
324
|
+
)
|
|
325
|
+
return True, msg
|