derivkit 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. derivkit/__init__.py +22 -0
  2. derivkit/calculus/__init__.py +17 -0
  3. derivkit/calculus/calculus_core.py +152 -0
  4. derivkit/calculus/gradient.py +97 -0
  5. derivkit/calculus/hessian.py +528 -0
  6. derivkit/calculus/hyper_hessian.py +296 -0
  7. derivkit/calculus/jacobian.py +156 -0
  8. derivkit/calculus_kit.py +128 -0
  9. derivkit/derivative_kit.py +315 -0
  10. derivkit/derivatives/__init__.py +6 -0
  11. derivkit/derivatives/adaptive/__init__.py +5 -0
  12. derivkit/derivatives/adaptive/adaptive_fit.py +238 -0
  13. derivkit/derivatives/adaptive/batch_eval.py +179 -0
  14. derivkit/derivatives/adaptive/diagnostics.py +325 -0
  15. derivkit/derivatives/adaptive/grid.py +333 -0
  16. derivkit/derivatives/adaptive/polyfit_utils.py +513 -0
  17. derivkit/derivatives/adaptive/spacing.py +66 -0
  18. derivkit/derivatives/adaptive/transforms.py +245 -0
  19. derivkit/derivatives/autodiff/__init__.py +1 -0
  20. derivkit/derivatives/autodiff/jax_autodiff.py +95 -0
  21. derivkit/derivatives/autodiff/jax_core.py +217 -0
  22. derivkit/derivatives/autodiff/jax_utils.py +146 -0
  23. derivkit/derivatives/finite/__init__.py +5 -0
  24. derivkit/derivatives/finite/batch_eval.py +91 -0
  25. derivkit/derivatives/finite/core.py +84 -0
  26. derivkit/derivatives/finite/extrapolators.py +511 -0
  27. derivkit/derivatives/finite/finite_difference.py +247 -0
  28. derivkit/derivatives/finite/stencil.py +206 -0
  29. derivkit/derivatives/fornberg.py +245 -0
  30. derivkit/derivatives/local_polynomial_derivative/__init__.py +1 -0
  31. derivkit/derivatives/local_polynomial_derivative/diagnostics.py +90 -0
  32. derivkit/derivatives/local_polynomial_derivative/fit.py +199 -0
  33. derivkit/derivatives/local_polynomial_derivative/local_poly_config.py +95 -0
  34. derivkit/derivatives/local_polynomial_derivative/local_polynomial_derivative.py +205 -0
  35. derivkit/derivatives/local_polynomial_derivative/sampling.py +72 -0
  36. derivkit/derivatives/tabulated_model/__init__.py +1 -0
  37. derivkit/derivatives/tabulated_model/one_d.py +247 -0
  38. derivkit/forecast_kit.py +783 -0
  39. derivkit/forecasting/__init__.py +1 -0
  40. derivkit/forecasting/dali.py +78 -0
  41. derivkit/forecasting/expansions.py +486 -0
  42. derivkit/forecasting/fisher.py +298 -0
  43. derivkit/forecasting/fisher_gaussian.py +171 -0
  44. derivkit/forecasting/fisher_xy.py +357 -0
  45. derivkit/forecasting/forecast_core.py +313 -0
  46. derivkit/forecasting/getdist_dali_samples.py +429 -0
  47. derivkit/forecasting/getdist_fisher_samples.py +235 -0
  48. derivkit/forecasting/laplace.py +259 -0
  49. derivkit/forecasting/priors_core.py +860 -0
  50. derivkit/forecasting/sampling_utils.py +388 -0
  51. derivkit/likelihood_kit.py +114 -0
  52. derivkit/likelihoods/__init__.py +1 -0
  53. derivkit/likelihoods/gaussian.py +136 -0
  54. derivkit/likelihoods/poisson.py +176 -0
  55. derivkit/utils/__init__.py +13 -0
  56. derivkit/utils/concurrency.py +213 -0
  57. derivkit/utils/extrapolation.py +254 -0
  58. derivkit/utils/linalg.py +513 -0
  59. derivkit/utils/logger.py +26 -0
  60. derivkit/utils/numerics.py +262 -0
  61. derivkit/utils/sandbox.py +74 -0
  62. derivkit/utils/types.py +15 -0
  63. derivkit/utils/validate.py +811 -0
  64. derivkit-1.0.0.dist-info/METADATA +50 -0
  65. derivkit-1.0.0.dist-info/RECORD +68 -0
  66. derivkit-1.0.0.dist-info/WHEEL +5 -0
  67. derivkit-1.0.0.dist-info/licenses/LICENSE +21 -0
  68. derivkit-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,315 @@
1
+ """Provides the DerivativeKit API.
2
+
3
+ This class is a lightweight front end over DerivKit’s derivative engines.
4
+ You provide the function to differentiate and the expansion point `x0`,
5
+ then choose a backend by name (e.g., ``"adaptive"`` or ``"finite"``).
6
+
7
+ Examples:
8
+ ---------
9
+ Basic usage:
10
+
11
+ >>> import numpy as np
12
+ >>> from derivkit.derivative_kit import DerivativeKit
13
+ >>> dk = DerivativeKit(function=np.cos, x0=1.0)
14
+ >>> dk.differentiate(method="adaptive", order=1) # doctest: +SKIP
15
+
16
+ Using tabulated data directly:
17
+
18
+ >>> import numpy as np
19
+ >>> from derivkit.derivative_kit import DerivativeKit
20
+ >>>
21
+ >>> x_tab = np.array([0.0, 1.0, 2.0, 3.0])
22
+ >>> y_tab = x_tab**2
23
+ >>> dk = DerivativeKit(x0=0.5, tab_x=x_tab, tab_y=y_tab)
24
+ >>> dk.differentiate(order=1, method="finite", extrapolation="ridders") # doctest: +SKIP
25
+
26
+ Listing built-in aliases:
27
+
28
+ >>> from derivkit.derivative_kit import available_methods
29
+ >>> available_methods() # doctest: +SKIP
30
+
31
+ Adding methods:
32
+ ---------------
33
+ New engines can be registered without modifying this class by calling
34
+ :func:`derivkit.derivative_kit.register_method` (see example below).
35
+
36
+ Registering a new method:
37
+
38
+ >>> from derivkit.derivative_kit import register_method # doctest: +SKIP
39
+ >>> from derivkit.some_new_method import NewMethodDerivative # doctest: +SKIP
40
+ >>> register_method( # doctest: +SKIP
41
+ ... name="new-method",
42
+ ... cls=NewMethodDerivative,
43
+ ... aliases=("new_method", "nm"),
44
+ ... ) # doctest: +SKIP
45
+ """
46
+
47
+ from __future__ import annotations
48
+
49
+ import re
50
+ from functools import lru_cache
51
+ from typing import Any, Callable, Iterable, Mapping, Protocol, Type
52
+
53
+ import numpy as np
54
+ from numpy.typing import ArrayLike
55
+
56
+ from derivkit.derivatives.adaptive.adaptive_fit import AdaptiveFitDerivative
57
+ from derivkit.derivatives.finite.finite_difference import (
58
+ FiniteDifferenceDerivative,
59
+ )
60
+ from derivkit.derivatives.fornberg import FornbergDerivative
61
+ from derivkit.derivatives.local_polynomial_derivative.local_polynomial_derivative import (
62
+ LocalPolynomialDerivative,
63
+ )
64
+ from derivkit.derivatives.tabulated_model.one_d import Tabulated1DModel
65
+
66
+
67
+ class DerivativeEngine(Protocol):
68
+ """Protocol each derivative engine must satisfy.
69
+
70
+ This defines the minimal interface expected by DerivKit’s derivative
71
+ backends. Any class registered as a derivative engine must be
72
+ constructible with a target function ``function`` and an expansion
73
+ point ``x0``, and must provide a ``.differentiate(...)`` method that
74
+ performs the actual derivative computation. It serves only as a
75
+ structural type check (similar to an abstract base class) and carries
76
+ no runtime behavior. In other words, this is a template for derivative
77
+ engine implementations.
78
+ """
79
+ def __init__(self, function: Callable[[float], Any], x0: float):
80
+ """Initialize the engine with a target function and expansion point."""
81
+ ...
82
+ def differentiate(self, *args: Any, **kwargs: Any) -> Any:
83
+ """Compute the derivative using the engine’s algorithm."""
84
+ ...
85
+
86
+
87
+ # These are the built-in derivative methods available by default.
88
+ # For each method we allow up to five aliases:
89
+ # - 3 obvious spelling / punctuation variants
90
+ # - 2 common short-hands users are likely to type
91
+ # This keeps the interface flexible without bloating the lookup table
92
+ # or introducing ambiguous scheme-level names.
93
+ _METHOD_SPECS: list[tuple[str, Type[DerivativeEngine], list[str]]] = [
94
+ ("adaptive", AdaptiveFitDerivative, ["adaptive-fit", "adaptive_fit", "ad", "adapt"]),
95
+ ("finite", FiniteDifferenceDerivative, ["finite-difference", "finite_difference", "fd", "findiff", "finite_diff"]),
96
+ ("local_polynomial", LocalPolynomialDerivative, ["local-polynomial", "local_polynomial", "lp", "localpoly", "local_poly"]),
97
+ ("fornberg", FornbergDerivative, ["fb", "forn", "fornberg-fd", "fornberg_fd", "fornberg_weights"]),
98
+ ]
99
+
100
+
101
+ def _norm(s: str) -> str:
102
+ """Normalize a method string for robust matching (case/spacing/punct insensitive).
103
+
104
+ Args:
105
+ s: Input string.
106
+
107
+ Returns:
108
+ Normalized string.
109
+ """
110
+ return re.sub(r"[^a-z0-9]+", "", s.lower())
111
+
112
+
113
+ @lru_cache(maxsize=1)
114
+ def _method_maps() -> tuple[Mapping[str, Type[DerivativeEngine]], tuple[str, ...]]:
115
+ """Construct and cache lookup tables for derivative methods.
116
+
117
+ This function builds the internal mappings that link user-provided method
118
+ names (and their aliases) to the corresponding derivative engine classes.
119
+ It also records the canonical method names used for display in help and
120
+ error messages. The result is cached after the first call for efficiency.
121
+ Caching means that any changes to the registered methods (via
122
+ ``register_method``) will not be reflected until the cache is cleared.
123
+
124
+ Returns:
125
+ A tuple containing:
126
+ A pair ``(method_map, canonical_names)`` where:
127
+ - ``method_map`` maps normalized names and aliases to engine classes.
128
+ - ``canonical_names`` lists the sorted canonical method names.
129
+ """
130
+ method_map: dict[str, Type[DerivativeEngine]] = {}
131
+ canonical: set[str] = set()
132
+ for name, cls, aliases in _METHOD_SPECS:
133
+ k = _norm(name)
134
+ method_map[k] = cls
135
+ canonical.add(k)
136
+ for a in aliases:
137
+ method_map[_norm(a)] = cls
138
+ return method_map, tuple(sorted(canonical))
139
+
140
+
141
+ def register_method(
142
+ name: str,
143
+ cls: Type[DerivativeEngine],
144
+ *,
145
+ aliases: Iterable[str] = (),
146
+ ) -> None:
147
+ """Register a new derivative method.
148
+
149
+ Adds a new derivative engine that can be referenced by name in
150
+ :class:`derivkit.derivative_kit.DerivativeKit`. This function can be called
151
+ from anywhere in the package (for example, inside a submodule’s
152
+ ``__init__.py``) and is safe regardless of import order. The internal cache
153
+ is automatically cleared and rebuilt on the next lookup.
154
+
155
+ Args:
156
+ name: Canonical public name of the method (e.g., ``"gp"``).
157
+ cls: Engine class implementing the
158
+ :class:`derivkit.derivative_kit.DerivativeEngine` protocol.
159
+ aliases: Additional accepted spellings (e.g., ``"gaussian-process"``).
160
+
161
+ Registering a new method:
162
+
163
+ >>> from derivkit.derivative_kit import register_method # doctest: +SKIP
164
+ >>> from derivkit.some_new_method import NewMethodDerivative # doctest: +SKIP
165
+ >>> register_method( # doctest: +SKIP
166
+ ... name="new-method",
167
+ ... cls=NewMethodDerivative,
168
+ ... aliases=("new_method", "nm"),
169
+ ... ) # doctest: +SKIP
170
+ """
171
+ _METHOD_SPECS.append((name, cls, list(aliases)))
172
+ _method_maps.cache_clear()
173
+
174
+
175
+ def _resolve(method: str) -> Type[DerivativeEngine]:
176
+ """Resolve a user-provided method name or alias to an engine class.
177
+
178
+ Args:
179
+ method: User-provided method name or alias.
180
+
181
+ Returns:
182
+ Corresponding derivative engine class.
183
+ """
184
+ method_map, canon = _method_maps()
185
+ try:
186
+ return method_map[_norm(method)]
187
+ except KeyError:
188
+ opts = ", ".join(canon)
189
+ raise ValueError(f"Unknown derivative method '{method}'. Choose one of {{{opts}}}.") from None
190
+
191
+
192
+ class DerivativeKit:
193
+ """Unified interface for computing numerical derivatives.
194
+
195
+ The class provides a simple way to evaluate derivatives using any of
196
+ DerivKit’s available backends (e.g., adaptive fit or finite difference).
197
+ By default, the adaptive-fit method is used.
198
+
199
+ You can supply either a function and x0, or tabulated tab_x/tab_y and x0
200
+ in case you want to differentiate a tabulated function.
201
+ The chosen backend is invoked when you call the ``.differentiate()`` method.
202
+
203
+ Example:
204
+ >>> import numpy as np
205
+ >>> from derivkit.derivative_kit import DerivativeKit
206
+ >>> dk = DerivativeKit(np.cos, x0=1.0)
207
+ >>> deriv = dk.differentiate(order=1) # uses the default "adaptive" method
208
+
209
+ Attributes:
210
+ function: The callable to differentiate.
211
+ x0: The point or points at which the derivative is evaluated.
212
+ default_method: The backend used when no method is specified.
213
+ """
214
+
215
+ def __init__(
216
+ self,
217
+ function: Callable[[float | np.ndarray], Any] | None = None,
218
+ x0: float | np.ndarray | None = None,
219
+ *,
220
+ tab_x: ArrayLike | None = None,
221
+ tab_y: ArrayLike | None = None,
222
+ ) -> None:
223
+ """Initializes the DerivativeKit with a target function and expansion point.
224
+
225
+ Args:
226
+ function: The function to be differentiated. Must accept a single float
227
+ and return a scalar or array-like output.
228
+ x0: Point or array of points at which to evaluate the derivative.
229
+ tab_x: Optional tabulated x values for creating a
230
+ :class:`tabulated_model.one_d.Tabulated1DModel`.
231
+ tab_y: Optional tabulated y values for creating a
232
+ :class:`tabulated_model.one_d.Tabulated1DModel`.
233
+ """
234
+ # Enforce "either function or tabulated", not both.
235
+ if function is not None and (tab_x is not None or tab_y is not None):
236
+ raise ValueError("Pass either `function` or (`tab_x`, `tab_y`), not both.")
237
+
238
+ if function is not None:
239
+ self.function = function
240
+
241
+ elif tab_x is not None or tab_y is not None:
242
+ if tab_x is None or tab_y is None:
243
+ raise ValueError("Both `tab_x` and `tab_y` must be provided for tabulated mode.")
244
+ model = Tabulated1DModel(tab_x, tab_y)
245
+ self.function = model
246
+
247
+ else:
248
+ raise ValueError("Need either `function` or (`tab_x`, `tab_y`).")
249
+
250
+ if x0 is None:
251
+ raise ValueError("`x0` must be provided.")
252
+ self.x0 = x0
253
+ self.default_method = "adaptive"
254
+
255
+ def differentiate(
256
+ self,
257
+ *,
258
+ method: str | None = None,
259
+ **kwargs: Any,
260
+ ) -> Any:
261
+ """Compute derivatives using the chosen method.
262
+
263
+ Forwards all keyword arguments to the engine’s ``.differentiate()``.
264
+
265
+ Args:
266
+ method: Method name or alias (e.g., ``"adaptive"``, ``"finite"``,
267
+ ``"fd"``). Default is ``"adaptive"``.
268
+ **kwargs: Passed through to the chosen engine.
269
+
270
+ Returns:
271
+ The derivative result from the underlying engine.
272
+
273
+ If ``x0`` is a single value, returns the usual derivative output.
274
+
275
+ If ``x0`` is an array of points, returns an array where the first
276
+ dimension indexes the points in ``x0``. For example, if you pass
277
+ 5 points and each derivative has shape ``(2, 3)``, the result has
278
+ shape ``(5, 2, 3)``.
279
+
280
+ Notes:
281
+ Thread-level parallelism across derivative evaluations can be
282
+ controlled by passing ``n_workers`` via ``**kwargs``. Note that
283
+ this does not launch separate Python processes. All work occurs
284
+ within a single process using worker threads.
285
+
286
+ Raises:
287
+ ValueError: If ``method`` is not recognized.
288
+ """
289
+ chosen = method or self.default_method # use default if None
290
+ Engine = _resolve(chosen)
291
+
292
+ x0_arr = np.asarray(self.x0)
293
+
294
+ # scalar x0
295
+ if x0_arr.ndim == 0:
296
+ return Engine(self.function, float(x0_arr)).differentiate(**kwargs)
297
+
298
+ # array of x0 values
299
+ results = []
300
+ for xi in x0_arr.ravel():
301
+ res = Engine(self.function, float(xi)).differentiate(**kwargs)
302
+ results.append(res)
303
+
304
+ return np.stack(results, axis=0).reshape(
305
+ x0_arr.shape + np.shape(results[0])
306
+ )
307
+
308
+
309
+ def available_methods() -> dict[str, list[str]]:
310
+ """Lists derivative methods exposed by this API, including aliases.
311
+
312
+ Returns:
313
+ Dict mapping canonical method name -> list of accepted aliases.
314
+ """
315
+ return {name: list(aliases) for name, _, aliases in _METHOD_SPECS}
@@ -0,0 +1,6 @@
1
+ """Derivative calculation module.
2
+
3
+ This module provides various methods for calculating derivatives of functions,
4
+ including finite difference methods, polynomial interpolation, and
5
+ automatic differentiation using JAX.
6
+ """
@@ -0,0 +1,5 @@
1
+ """Adaptive numerical differentiation utilities.
2
+
3
+ Provides batch evaluation, spacing policies, polynomial fitting helpers,
4
+ and an adaptive single-fit derivative estimator for scalar and vector outputs.
5
+ """
@@ -0,0 +1,238 @@
1
+ """Adaptive polynomial-fit derivatives for estimating derivatives from function samples spaced around x0."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import numpy as np
6
+
7
+ from derivkit.derivatives.adaptive.batch_eval import eval_function_batch
8
+ from derivkit.derivatives.adaptive.diagnostics import (
9
+ fit_is_obviously_bad,
10
+ make_derivative_diag,
11
+ print_derivative_diagnostics,
12
+ )
13
+ from derivkit.derivatives.adaptive.grid import (
14
+ ensure_min_samples_and_maybe_rebuild,
15
+ make_domain_aware_chebyshev_grid,
16
+ )
17
+ from derivkit.derivatives.adaptive.polyfit_utils import (
18
+ assess_polyfit_quality,
19
+ fit_with_headroom_and_maybe_minimize,
20
+ pullback_derivative_from_fit,
21
+ scale_offsets,
22
+ )
23
+ from derivkit.utils.logger import derivkit_logger
24
+
25
+
26
+ class AdaptiveFitDerivative:
27
+ """Derivative estimation via a single local polynomial fit around x0."""
28
+
29
+ def __init__(self, func, x0: float):
30
+ """Initialize the estimator.
31
+
32
+ Args:
33
+ func: Callable mapping a float to a scalar or 1D array-like output.
34
+ x0: Expansion point about which derivatives are computed.
35
+ """
36
+ self.func = func
37
+ self.x0 = float(x0)
38
+
39
+ def differentiate(
40
+ self,
41
+ order: int,
42
+ *,
43
+ n_points: int = 10,
44
+ spacing: float | str | None = "auto",
45
+ base_abs: float | None = None,
46
+ n_workers: int = 1,
47
+ grid: tuple[str, np.ndarray] | None = None, # ('offsets'|'absolute', array)
48
+ domain: "tuple[float | None, float | None] | None" = None,
49
+ ridge: float = 0.0,
50
+ return_error: bool = False,
51
+ diagnostics: bool = False,
52
+ meta: dict | None = None,
53
+ ):
54
+ """Compute the derivative of specified order at x0 using an adaptive polynomial fit.
55
+
56
+ Sampling strategy:
57
+ - grid=None: symmetric Chebyshev offsets around x0 with half-width from `spacing`.
58
+ - grid=("offsets", arr): explicit offsets t; samples at x = x0 + t (0 inserted if missing).
59
+ - grid=("absolute", arr): explicit absolute x positions; samples at x = arr.
60
+
61
+ Args:
62
+ order: Derivative order (>=1).
63
+ n_points: Number of sample points when building the default grid. Default is 10.
64
+ spacing: Scale for the default symmetric grid around ``x0`` (ignored when ``grid`` is provided).
65
+
66
+ Accepted forms:
67
+
68
+ - float: interpreted as an absolute half-width ``h``; samples in ``[x0 - h, x0 + h]``.
69
+ - "<pct>%": percentage string; ``h`` is that fraction of a local scale
70
+ set by ``abs(x0)`` with a floor ``base_abs`` near zero.
71
+ - "auto": choose ``h`` adaptively. DerivKit picks a half-width based on
72
+ the local scale of ``x0`` with a minimum of ``base_abs``; if ``domain``
73
+ is given, the interval is clipped to stay inside ``(lo, hi)``. The
74
+ default grid uses Chebyshev nodes on that interval and always includes
75
+ the center point.
76
+
77
+ base_abs: Absolute spacing floor used by "auto"/percentage near x0≈0. Defaults to ``1e-3`` if not set.
78
+ n_workers: Parallel workers for batched function evals (1 = serial).
79
+ grid: Either ("offsets", array) or ('absolute', array), or None for default.
80
+
81
+ This lets the user supply their own sampling points instead of using the
82
+ automatically built Chebyshev grid. With ``("offsets", arr)``, the array
83
+ gives relative offsets from ``x0`` (samples at ``x = x0 + t``). With
84
+ ``('absolute', arr)``, the array gives absolute ``x`` positions. If
85
+ ``None``, the method builds a symmetric default grid around ``x0``.
86
+
87
+ domain: Optional (lo, hi) used to trigger domain-aware transforms in default mode.
88
+ ridge: Ridge regularization for polynomial fit. Defaults to 0.0.
89
+
90
+ This term adds a small penalty to the fit to keep the coefficients from
91
+ becoming too large when the Vandermonde matrix is nearly singular.
92
+ Increasing ``ridge`` makes the fit more stable but slightly smoother;
93
+ setting it to 0 disables the regularization. Default is 0.0.
94
+
95
+ return_error: If True, also returns an error proxy based on the RMS residual
96
+ of the polynomial fit (same shape as the derivative for multi-component outputs).
97
+
98
+ diagnostics: If True, return (derivative, diagnostics_dict).
99
+ meta: Extra metadata to carry in diagnostics.
100
+
101
+ Returns:
102
+ The derivative estimate at x0, optionally accompanied by an error proxy and/or
103
+ a diagnostics dictionary depending on the flags:
104
+
105
+ - If ``return_error`` is False and ``diagnostics`` is False:
106
+ returns ``deriv``.
107
+ - If ``return_error`` is True and ``diagnostics`` is False:
108
+ returns ``(deriv, err)``.
109
+ - If ``return_error`` is False and ``diagnostics`` is True:
110
+ returns ``(deriv, diag)``.
111
+ - If both ``return_error`` and ``diagnostics`` are True:
112
+ returns ``(deriv, err, diag)``.
113
+
114
+ Raises:
115
+ ValueError: If inputs are invalid or not enough samples are provided.
116
+ """
117
+ if order < 1:
118
+ raise ValueError("order must be >= 1")
119
+
120
+ # 1) Choose sample locations (x, t)
121
+ if grid is not None:
122
+ if not (isinstance(grid, tuple) and len(grid) == 2 and isinstance(grid[0], str)):
123
+ raise ValueError("grid must be ('offsets'|'absolute', numpy_array) or None.")
124
+ kind, arr = grid
125
+ arr = np.asarray(arr, dtype=float).ravel()
126
+ match kind:
127
+ case "offsets":
128
+ t = np.sort(np.unique(np.append(arr, 0.0))) # ensure center; sorted for stability
129
+ x = self.x0 + t
130
+ case "absolute":
131
+ x = np.sort(arr)
132
+ t = x - self.x0
133
+ case _:
134
+ raise ValueError("grid kind must be 'offsets' or 'absolute'.")
135
+ mode, spacing_resolved, sign_used = "x", float("nan"), None
136
+ else:
137
+ mode, x, t, spacing_resolved, sign_used = make_domain_aware_chebyshev_grid(
138
+ self.x0,
139
+ n_points=n_points,
140
+ spacing=spacing,
141
+ base_abs=base_abs,
142
+ domain=domain,
143
+ max_cheby_points=30,
144
+ )
145
+
146
+ # 1b) Ensure enough samples (rebuild default Chebyshev grids if needed)
147
+ mode, x, t, spacing_resolved, sign_used = ensure_min_samples_and_maybe_rebuild(
148
+ mode=mode,
149
+ x=x,
150
+ t=t,
151
+ spacing_resolved=spacing_resolved,
152
+ sign_used=sign_used,
153
+ x0=self.x0,
154
+ order=order,
155
+ n_points=n_points,
156
+ spacing=spacing,
157
+ base_abs=base_abs,
158
+ max_cheby_points=30,
159
+ )
160
+
161
+ # 2) Evaluate function on the grid
162
+ ys = eval_function_batch(self.func, x, n_workers=n_workers)
163
+ if ys.ndim == 1:
164
+ ys = ys[:, None]
165
+ n_components = ys.shape[1]
166
+
167
+ # 3) Polynomial fit (scaled offsets) with headroom + optional minimal-degree swap
168
+ u, factor = scale_offsets(t)
169
+ coeffs, rrms, deg = fit_with_headroom_and_maybe_minimize(
170
+ u, ys, order=order, mode=mode, ridge=ridge, factor=factor
171
+ )
172
+
173
+ # 3b) Fit quality (soft warnings only)
174
+ metrics, suggestions = assess_polyfit_quality(
175
+ u, ys, coeffs, deg, ridge=ridge, factor=factor, order=order
176
+ )
177
+ bad, msg = fit_is_obviously_bad(metrics)
178
+ if bad:
179
+ pretty_suggestions = "\n ".join(suggestions)
180
+ derivkit_logger.info(
181
+ msg
182
+ + "\nTo improve this derivative, try:\n "
183
+ + pretty_suggestions
184
+ )
185
+
186
+ # 4) Derivative (mode-aware pullback)
187
+ deriv = pullback_derivative_from_fit(
188
+ mode=mode,
189
+ order=order,
190
+ coeffs=coeffs,
191
+ factor=factor,
192
+ x0=self.x0,
193
+ sign_used=sign_used,
194
+ )
195
+ out = deriv.item() if n_components == 1 else deriv
196
+
197
+ # error proxy: rrms is just a rough uncertainty indicator from the fit residual.
198
+ if np.isscalar(rrms) or np.ndim(rrms) == 0:
199
+ err = float(rrms)
200
+ else:
201
+ err = rrms
202
+
203
+ if not diagnostics:
204
+ return (out, err) if return_error else out
205
+
206
+ # 5) Diagnostics (optional)
207
+ degree_out = int(deg) if n_components == 1 else [int(deg)] * n_components
208
+ diag = make_derivative_diag(
209
+ x=x,
210
+ t=t,
211
+ u=u,
212
+ y=ys,
213
+ degree=degree_out,
214
+ spacing_resolved=spacing_resolved,
215
+ rrms=rrms,
216
+ coeffs=coeffs,
217
+ ridge=ridge,
218
+ order=order,
219
+ )
220
+ meta_payload = {
221
+ "x0": self.x0,
222
+ "order": order,
223
+ "n_points": len(x),
224
+ "spacing": spacing,
225
+ "base_abs": base_abs,
226
+ "spacing_resolved": spacing_resolved,
227
+ "n_workers": n_workers,
228
+ "domain": domain,
229
+ "mode": mode,
230
+ "ridge": ridge,
231
+ **(meta or {}),
232
+ }
233
+ print_derivative_diagnostics(diag, meta=meta_payload)
234
+ diag_out = {**diag, "x0": self.x0, "meta": meta_payload}
235
+
236
+ if return_error:
237
+ return out, err, diag_out
238
+ return out, diag_out