derivkit 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. derivkit/__init__.py +22 -0
  2. derivkit/calculus/__init__.py +17 -0
  3. derivkit/calculus/calculus_core.py +152 -0
  4. derivkit/calculus/gradient.py +97 -0
  5. derivkit/calculus/hessian.py +528 -0
  6. derivkit/calculus/hyper_hessian.py +296 -0
  7. derivkit/calculus/jacobian.py +156 -0
  8. derivkit/calculus_kit.py +128 -0
  9. derivkit/derivative_kit.py +315 -0
  10. derivkit/derivatives/__init__.py +6 -0
  11. derivkit/derivatives/adaptive/__init__.py +5 -0
  12. derivkit/derivatives/adaptive/adaptive_fit.py +238 -0
  13. derivkit/derivatives/adaptive/batch_eval.py +179 -0
  14. derivkit/derivatives/adaptive/diagnostics.py +325 -0
  15. derivkit/derivatives/adaptive/grid.py +333 -0
  16. derivkit/derivatives/adaptive/polyfit_utils.py +513 -0
  17. derivkit/derivatives/adaptive/spacing.py +66 -0
  18. derivkit/derivatives/adaptive/transforms.py +245 -0
  19. derivkit/derivatives/autodiff/__init__.py +1 -0
  20. derivkit/derivatives/autodiff/jax_autodiff.py +95 -0
  21. derivkit/derivatives/autodiff/jax_core.py +217 -0
  22. derivkit/derivatives/autodiff/jax_utils.py +146 -0
  23. derivkit/derivatives/finite/__init__.py +5 -0
  24. derivkit/derivatives/finite/batch_eval.py +91 -0
  25. derivkit/derivatives/finite/core.py +84 -0
  26. derivkit/derivatives/finite/extrapolators.py +511 -0
  27. derivkit/derivatives/finite/finite_difference.py +247 -0
  28. derivkit/derivatives/finite/stencil.py +206 -0
  29. derivkit/derivatives/fornberg.py +245 -0
  30. derivkit/derivatives/local_polynomial_derivative/__init__.py +1 -0
  31. derivkit/derivatives/local_polynomial_derivative/diagnostics.py +90 -0
  32. derivkit/derivatives/local_polynomial_derivative/fit.py +199 -0
  33. derivkit/derivatives/local_polynomial_derivative/local_poly_config.py +95 -0
  34. derivkit/derivatives/local_polynomial_derivative/local_polynomial_derivative.py +205 -0
  35. derivkit/derivatives/local_polynomial_derivative/sampling.py +72 -0
  36. derivkit/derivatives/tabulated_model/__init__.py +1 -0
  37. derivkit/derivatives/tabulated_model/one_d.py +247 -0
  38. derivkit/forecast_kit.py +783 -0
  39. derivkit/forecasting/__init__.py +1 -0
  40. derivkit/forecasting/dali.py +78 -0
  41. derivkit/forecasting/expansions.py +486 -0
  42. derivkit/forecasting/fisher.py +298 -0
  43. derivkit/forecasting/fisher_gaussian.py +171 -0
  44. derivkit/forecasting/fisher_xy.py +357 -0
  45. derivkit/forecasting/forecast_core.py +313 -0
  46. derivkit/forecasting/getdist_dali_samples.py +429 -0
  47. derivkit/forecasting/getdist_fisher_samples.py +235 -0
  48. derivkit/forecasting/laplace.py +259 -0
  49. derivkit/forecasting/priors_core.py +860 -0
  50. derivkit/forecasting/sampling_utils.py +388 -0
  51. derivkit/likelihood_kit.py +114 -0
  52. derivkit/likelihoods/__init__.py +1 -0
  53. derivkit/likelihoods/gaussian.py +136 -0
  54. derivkit/likelihoods/poisson.py +176 -0
  55. derivkit/utils/__init__.py +13 -0
  56. derivkit/utils/concurrency.py +213 -0
  57. derivkit/utils/extrapolation.py +254 -0
  58. derivkit/utils/linalg.py +513 -0
  59. derivkit/utils/logger.py +26 -0
  60. derivkit/utils/numerics.py +262 -0
  61. derivkit/utils/sandbox.py +74 -0
  62. derivkit/utils/types.py +15 -0
  63. derivkit/utils/validate.py +811 -0
  64. derivkit-1.0.0.dist-info/METADATA +50 -0
  65. derivkit-1.0.0.dist-info/RECORD +68 -0
  66. derivkit-1.0.0.dist-info/WHEEL +5 -0
  67. derivkit-1.0.0.dist-info/licenses/LICENSE +21 -0
  68. derivkit-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,513 @@
1
+ """Utilities for polynomial fitting and evaluation."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from math import factorial
6
+
7
+ import numpy as np
8
+ import numpy.linalg as npl
9
+
10
+ from .transforms import (
11
+ signed_log_derivatives_to_x,
12
+ sqrt_derivatives_to_x_at_zero,
13
+ )
14
+
15
+ __all__ = [
16
+ "choose_degree",
17
+ "scale_offsets",
18
+ "fit_multi_power",
19
+ "extract_derivative",
20
+ "assess_polyfit_quality",
21
+ "fit_with_headroom_and_maybe_minimize",
22
+ "pullback_derivative_from_fit",
23
+ ]
24
+
25
+
26
+ def _vandermonde(t: np.ndarray, deg: int) -> np.ndarray:
27
+ """Return the Vandermonde matrix for 1D inputs in the power basis.
28
+
29
+ Args:
30
+ t: 1D array of shape (n_points,).
31
+ deg: Polynomial degree.
32
+
33
+ Returns:
34
+ np.ndarray: Matrix of shape (n_points, deg+1) with columns [1, t, t**2, ..., t**deg].
35
+
36
+ Raises:
37
+ ValueError: If `t` is not 1D or `deg` < 0.
38
+ """
39
+ t = np.asarray(t, dtype=float)
40
+ if t.ndim != 1:
41
+ raise ValueError("t must be 1D.")
42
+ if deg < 0:
43
+ raise ValueError("deg must be >= 0.")
44
+
45
+ return np.vander(t, N=deg + 1, increasing=True)
46
+
47
+
48
+ def choose_degree(order: int, n_pts: int, extra: int = 5) -> int:
49
+ """Choose a polynomial degree given derivative order and sample size.
50
+
51
+ Selects ``min(order + extra, n_pts - 1)`` to avoid underdetermined fits while
52
+ allowing some flexibility beyond the target derivative order.
53
+
54
+ Args:
55
+ order: Derivative order (>= 0).
56
+ n_pts: Number of available points (>= 1).
57
+ extra: Extra degrees beyond ``order`` (>= 0). Default is 5.
58
+
59
+ Returns:
60
+ int: Chosen polynomial degree.
61
+
62
+ Raises:
63
+ ValueError: If ``order < 0``, ``n_pts < 1``, or ``extra < 0``.
64
+ """
65
+ if order < 0:
66
+ raise ValueError("order must be >= 0")
67
+ if n_pts < 1:
68
+ raise ValueError("n_pts must be >= 1")
69
+ if extra < 0:
70
+ raise ValueError("extra must be >= 0")
71
+
72
+ return min(order + extra, n_pts - 1)
73
+
74
+
75
+ def scale_offsets(t: np.ndarray) -> tuple[np.ndarray, float]:
76
+ """Rescale offsets to improve numerical stability.
77
+
78
+ Converts offsets `t` to `u = t/s`, where `s = max(|t|)` (or `1` if `t` is
79
+ empty or all zeros). This mitigates instability in polynomial fitting and
80
+ differentiation, where powers of `t` can become very large or very small.
81
+
82
+ Args:
83
+ t: 1D array of offsets (can be empty).
84
+
85
+ Returns:
86
+ u: Scaled offsets, same shape as `t`.
87
+ s: Positive scaling factor.
88
+
89
+ Raises:
90
+ ValueError: If `t` is not 1D.
91
+ """
92
+ t = np.asarray(t, dtype=float)
93
+ s = float(np.max(np.abs(t))) if t.size else 1.0
94
+ if not np.isfinite(s) or s <= 0.0:
95
+ s = 1.0
96
+ return t / s, s
97
+
98
+
99
+ def fit_multi_power(
100
+ u: np.ndarray, y: np.ndarray, deg: int, ridge: float = 0.0
101
+ ) -> tuple[np.ndarray, np.ndarray]:
102
+ """Perform a least-squares polynomial fit in the power basis for multiple components.
103
+
104
+ This is a vectorized version of `fit_and_rel_rms_multi` using the Vandermonde matrix.
105
+
106
+ Args:
107
+ u: 1D array of scaled independent variable values (n_pts,).
108
+ y: 2D array of dependent variable values (n_pts, n_comp).
109
+ deg: Degree of polynomial to fit (integer, >= 0).
110
+ ridge: Optional ridge regularization parameter (default 0.0).
111
+
112
+ Returns:
113
+ C: Array of shape (deg+1, n_comp) with power-basis coefficients.
114
+ rrms: Array of shape (n_comp,) with relative RMS errors.
115
+
116
+ Raises:
117
+ ValueError: If inputs have wrong shapes/lengths or degree is invalid.
118
+ TypeError: If `deg` is not an integer.
119
+ """
120
+ u = np.asarray(u, dtype=float)
121
+ y = np.asarray(y, dtype=float)
122
+
123
+ if u.ndim != 1:
124
+ raise ValueError("u must be 1D.")
125
+ if y.ndim != 2:
126
+ raise ValueError("y must be 2D (n_pts, n_comp).")
127
+
128
+ if y.shape[0] != u.size:
129
+ raise ValueError("len(u) must match y.shape[0].")
130
+ if deg < 0 or deg >= u.size:
131
+ raise ValueError("deg must be in [0, n_pts-1].")
132
+
133
+ vander = np.vander(u, N=deg + 1, increasing=True)
134
+ u, s, vt = np.linalg.svd(vander, full_matrices=False)
135
+ if ridge and ridge > 0.0:
136
+ s_filtered = s / (s * s + ridge)
137
+ else:
138
+ s_filtered = np.where(s > 0, 1.0 / s, 0.0)
139
+ coeffs = (vt.T * s_filtered) @ (u.T @ y)
140
+
141
+ res = y - vander @ coeffs
142
+ rms = np.sqrt(np.mean(res * res, axis=0))
143
+ yc = y - np.mean(y, axis=0, keepdims=True)
144
+ scale = np.sqrt(np.mean(yc * yc, axis=0)) + 1e-15
145
+ rrms = rms / scale
146
+ return coeffs, rrms
147
+
148
+
149
+ def extract_derivative(
150
+ coeffs: np.ndarray, order: int, scale: float
151
+ ) -> np.ndarray:
152
+ """Extract the derivative of given order from power-basis coefficients.
153
+
154
+ Args:
155
+ coeffs: array of shape (deg+1, n_comp) with power-basis coefficients
156
+ order: derivative order (>= 0)
157
+ scale: scaling factor used in offsets (s > 0)
158
+
159
+ Returns:
160
+ deriv: array of shape (n_comp,) with the estimated derivative values
161
+
162
+ Raises:
163
+ ValueError: if order < 0 or scale <= 0 or C has invalid shape
164
+ """
165
+ if order < 0:
166
+ raise ValueError("order must be >= 0")
167
+ if scale <= 0.0 or not np.isfinite(scale):
168
+ raise ValueError("scale must be > 0 and finite.")
169
+
170
+ a_m = coeffs[order, :]
171
+ return (factorial(order) * a_m) / (scale**order)
172
+
173
+
174
+ def assess_polyfit_quality(
175
+ u: np.ndarray,
176
+ y: np.ndarray,
177
+ coeffs: np.ndarray,
178
+ deg: int,
179
+ ridge: float = 0.0,
180
+ factor: float = 1.0,
181
+ order: int = 0,
182
+ ) -> tuple[dict[str, float | dict[str, float]], list[str]]:
183
+ """Assess numerical quality of a power-basis polynomial fit.
184
+
185
+ Computes several diagnostics for a polynomial fit evaluated on scaled offsets
186
+ ``u``:
187
+
188
+ - Relative RMS residual (``rrms_rel``)
189
+ - Leave-one-out (LOO) relative RMSE via the ridge hat matrix (``loo_rel``)
190
+ - Condition number of the Vandermonde design matrix (``cond_vdm``)
191
+ - Relative change of the target derivative compared to a degree-1 refit
192
+ (``deriv_rel``), when feasible
193
+
194
+ It also returns human-readable suggestions to improve a poor fit. The thresholds
195
+ are heuristic and may be tuned for your application. The LOO estimate is derived
196
+ from the ridge hat-matrix diagonal, which is a fast approximation and works well
197
+ for spotting overfit/outliers.
198
+
199
+ Args:
200
+ u (ndarray): Scaled offsets, shape ``(n,)``.
201
+ y (ndarray): Function values at the sample points, shape ``(n, m)``.
202
+ coeffs (ndarray): Power-basis coefficients of the fit with columns per
203
+ component, shape ``(deg+1, m)``. Column ``k`` corresponds to the
204
+ coefficient of ``u**k``.
205
+ deg (int): Polynomial degree used in the fit (``>= 0``).
206
+ ridge (float): Ridge regularization used in the fit (``>= 0``).
207
+ factor (float): Scaling factor such that original offsets ``t = u * factor``.
208
+ Used only when extracting derivatives in the refit comparison.
209
+ order (int): Derivative order of interest (``>= 0``).
210
+
211
+ Returns:
212
+ tuple[dict, list[str]]: A tuple ``(metrics, suggestions)`` where:
213
+
214
+ - ``metrics`` is a dict with keys:
215
+ - ``"rrms_rel"`` (float)
216
+ - ``"loo_rel"`` (float)
217
+ - ``"cond_vdm"`` (float)
218
+ - ``"deriv_rel"`` (float)
219
+ - ``"thresholds"`` (dict): contains the threshold values used for
220
+ each metric (same keys as above)
221
+ - ``suggestions`` is a list of textual recommendations to improve the fit.
222
+
223
+ Notes:
224
+ Large values of any metric indicate potential instability. Consider widening
225
+ the sampling window (``spacing``), modestly increasing the number of points,
226
+ or adding light ridge regularization.
227
+ """
228
+ # build design matrix [1, u, u^2, ...]
229
+ design = np.vstack([u ** k for k in range(deg + 1)]).T # (n, deg+1)
230
+
231
+ # predictions and residuals
232
+ y_hat = design @ coeffs # (n, m)
233
+ resid = y - y_hat
234
+ rrms = np.sqrt(np.mean(resid**2, axis=0)) # (m,)
235
+ y_scale = np.median(np.abs(y), axis=0) + 1e-12
236
+ rrms_rel = float(np.max(rrms / y_scale))
237
+
238
+ # ridge hat-matrix diagonal for LOO residuals
239
+ gram = design.T @ design + ridge * np.eye(deg + 1)
240
+ gram_inv = npl.pinv(gram) if ridge == 0.0 else npl.inv(gram)
241
+ design_gram_inv = design @ gram_inv
242
+ h_diag = np.sum(design_gram_inv * design, axis=1) # (n,)
243
+ loo_resid = resid / (1.0 - h_diag)[:, None]
244
+ loo_rmse = np.sqrt(np.mean(loo_resid**2, axis=0)) # (m,)
245
+ loo_rel = float(np.max(loo_rmse / y_scale))
246
+
247
+ # condition number of the design (svd-based, no ridge)
248
+ sing_vals = npl.svd(design, compute_uv=False)
249
+ cond_vdm = float((sing_vals[0] / sing_vals[-1]) if sing_vals[-1] > 0 else np.inf)
250
+
251
+ # derivative stability vs lower-degree refit
252
+ deriv_rel = 0.0
253
+ step_down = 2 # compare deg vs deg-2 for a stronger signal
254
+ if deg >= max(order, 1) + step_down:
255
+ deg_low = deg - step_down
256
+ design_low = design[:, :deg_low + 1]
257
+ gram_low = design_low.T @ design_low + ridge * np.eye(deg_low + 1)
258
+ gram_low_inv = npl.pinv(gram_low) if ridge == 0.0 else npl.inv(gram_low)
259
+ coeffs_low = gram_low_inv @ design_low.T @ y
260
+
261
+ deriv_full = extract_derivative(coeffs, order, factor)
262
+ deriv_low = extract_derivative(coeffs_low, order, factor)
263
+
264
+ num = np.max(np.abs(deriv_full - deriv_low))
265
+ den = np.max(np.abs(deriv_full)) + 1e-12
266
+ deriv_rel = float(num / den)
267
+
268
+ # heuristic thresholds
269
+ th = {"rrms_rel": 5e-4, "loo_rel": 1e-3, "cond_vdm": 1e8, "deriv_rel": 5e-3}
270
+
271
+ # user-facing, concrete suggestions
272
+ suggestions: list[str] = []
273
+
274
+ # 1) Residuals too large: polynomial not matching data well.
275
+ if rrms_rel > th["rrms_rel"] or loo_rel > th["loo_rel"]:
276
+ suggestions.append(
277
+ "- Use a wider sampling window: increase `spacing` "
278
+ "(e.g. from '2%' to '4%' or multiply your numeric spacing by 2)."
279
+ )
280
+ suggestions.append(
281
+ "- Use more sample points: increase `n_points` by ~4–8 "
282
+ "(up to the documented Chebyshev cap, e.g. 20–30)."
283
+ )
284
+ suggestions.append(
285
+ "- If the function is noisy, add a small `ridge` (e.g. 1e-8–1e-4)."
286
+ )
287
+
288
+ # 2) Vandermonde badly conditioned: grid/degree combo is numerically fragile.
289
+ if cond_vdm > th["cond_vdm"]:
290
+ suggestions.append(
291
+ "- Add light regularization: try `ridge=1e-8` or `ridge=1e-6`."
292
+ )
293
+ suggestions.append(
294
+ "- Slightly widen `spacing` so nodes are less clustered."
295
+ )
296
+ suggestions.append(
297
+ "- If you passed a custom grid, try fewer extreme points or a "
298
+ "more symmetric set around `x0`."
299
+ )
300
+
301
+ # 3) Derivative changes a lot when degree changes: overfitting / not enough info.
302
+ if deriv_rel > th["deriv_rel"]:
303
+ suggestions.append(
304
+ "- Increase `n_points` to give the fit more information "
305
+ "(for example from 10 → 16 or 20)."
306
+ )
307
+ suggestions.append(
308
+ "- Slightly widen `spacing` so the polynomial sees a smoother neighborhood."
309
+ )
310
+ suggestions.append(
311
+ "- If this still triggers, consider using `method='polyfit'` for this "
312
+ "derivative as a safer fallback."
313
+ )
314
+
315
+ if not suggestions:
316
+ suggestions.append(
317
+ "- No obvious numerical issues detected; the adaptive fit looks stable "
318
+ "for this configuration. Congratulations!"
319
+ )
320
+
321
+ metrics: dict[str, float | dict[str, float]] = {
322
+ "rrms_rel": rrms_rel,
323
+ "loo_rel": loo_rel,
324
+ "cond_vdm": cond_vdm,
325
+ "deriv_rel": deriv_rel,
326
+ "thresholds": th,
327
+ }
328
+ return metrics, suggestions
329
+
330
+
331
+ def fit_with_headroom_and_maybe_minimize(
332
+ u: np.ndarray, y: np.ndarray, *, order: int, mode: str, ridge: float, factor: float, deg_cap: int = 8
333
+ ) -> tuple[np.ndarray, np.ndarray, int]:
334
+ """Perform a polynomial fit with headroom and optionally prefer minimal degree.
335
+
336
+ Fits a polynomial of degree ``deg_hi = deg_req + headroom`` to the data and, if the
337
+ lower-degree fit (``deg_req``) yields effectively identical derivatives, switches
338
+ to the minimal degree for stability and exactness. The method ensures that exact
339
+ polynomials or smooth functions yield consistent derivatives without overfitting.
340
+
341
+ Args:
342
+ u: Scaled independent variable values (offsets), shape ``(n_points,)``.
343
+ y: Function values evaluated at the grid points, shape ``(n_points, n_components)``.
344
+ order: Derivative order to compute (``>= 1``).
345
+ mode: Sampling mode — one of ``"x"``, ``"signed_log"``, or ``"sqrt"``.
346
+ Determines whether additional pullback corrections are applied when comparing fits.
347
+ ridge: Ridge regularization parameter applied in the least-squares fit.
348
+ factor: Scaling factor relating physical offsets to scaled ones (``t = u * factor``).
349
+ deg_cap: Maximum allowed polynomial degree (default 8).
350
+
351
+ Returns:
352
+ Tuple[np.ndarray, np.ndarray, int]:
353
+ A 3-tuple ``(coeffs, rrms, deg_used)`` where:
354
+ - ``coeffs``: Power-basis polynomial coefficients, shape ``(deg+1, n_components)``.
355
+ - ``rrms``: Relative RMS residuals per component, shape ``(n_components,)``.
356
+ - ``deg_used``: Polynomial degree actually adopted (either ``deg_req`` or ``deg_hi``).
357
+
358
+ Raises:
359
+ ValueError: If the fit fails due to invalid input dimensions or degree constraints.
360
+
361
+ Notes:
362
+ - ``deg_req`` equals ``2 * order`` for ``"sqrt"`` mode, else ``order``.
363
+ - Headroom is set to +4 for second-order sqrt mode, otherwise +2.
364
+ - The switch to minimal degree occurs only if both:
365
+ (a) the lower-degree fit has negligible residuals (``rrms < 5e-15``), and
366
+ (b) its derivatives match the higher-degree fit within absolute tolerance 1e-9.
367
+ """
368
+ u = np.asarray(u, dtype=float)
369
+ y = np.asarray(y, dtype=float)
370
+
371
+ n_eff = u.size
372
+ if n_eff < 2:
373
+ raise ValueError("Need at least 2 samples for polynomial fit.")
374
+ if y.ndim != 2 or y.shape[0] != n_eff:
375
+ raise ValueError("y must have shape (n_points, n_components).")
376
+
377
+ # Required degree: sqrt-mode needs more for pullback.
378
+ deg_req = (2 * order) if (mode == "sqrt") else order
379
+
380
+ # Headroom, matching the original intent.
381
+ extra_need = 4 if (mode == "sqrt" and order == 2) else 2
382
+ deg_max_raw = deg_req + extra_need
383
+
384
+ # Hard cap + overdetermined constraint: keep design safe.
385
+ deg_max = min(deg_max_raw, (n_eff - 1) // 2, deg_cap)
386
+
387
+ if deg_max < deg_req:
388
+ deg_max = deg_req # should be safe due to min-sample logic upstream
389
+
390
+ # Choose a numerically stable degree in [deg_req, deg_max].
391
+ deg_used = _choose_stable_degree(u, deg_req, deg_max, cond_max=1e8)
392
+
393
+ # Fit at the chosen degree.
394
+ coeffs, rrms = fit_multi_power(u, y, deg_used, ridge=ridge)
395
+
396
+ # For order 1–2 we KEEP the higher-degree fit for accuracy.
397
+ # Only for higher-order derivatives do we consider collapsing.
398
+ if deg_used > deg_req and order >= 3:
399
+ try:
400
+ coeffs_min, rrms_min = fit_multi_power(u, y, deg_req, ridge=ridge)
401
+
402
+ d_hi = pullback_derivative_from_fit(
403
+ mode=mode,
404
+ order=order,
405
+ coeffs=coeffs,
406
+ factor=factor,
407
+ x0=0.0,
408
+ sign_used=(+1.0 if mode == "sqrt" else None),
409
+ )
410
+ d_min = pullback_derivative_from_fit(
411
+ mode=mode,
412
+ order=order,
413
+ coeffs=coeffs_min,
414
+ factor=factor,
415
+ x0=0.0,
416
+ sign_used=(+1.0 if mode == "sqrt" else None),
417
+ )
418
+
419
+ num = np.max(np.abs(d_hi - d_min))
420
+ den = np.max(np.abs(d_hi)) + 1e-12
421
+ if num / den < 5e-3:
422
+ return coeffs_min, rrms_min, deg_req
423
+ except Exception:
424
+ # If anything goes odd, keep the original stable fit.
425
+ pass
426
+
427
+ return coeffs, rrms, deg_used
428
+
429
+
430
+ def pullback_derivative_from_fit(
431
+ *, mode: str, order: int, coeffs: np.ndarray, factor: float, x0: float, sign_used: float | None
432
+ ) -> np.ndarray:
433
+ """Extract the derivative at ``x0`` with mode-specific pullbacks.
434
+
435
+ Interprets the power-basis polynomial coefficients in the internal coordinate
436
+ and converts the requested derivative to the physical ``x`` domain. In
437
+ ``"x"`` mode, the derivative is read directly from the power basis. In
438
+ transformed modes, an analytic pullback is applied: the signed-log chain
439
+ rule or the boundary-centered square-root mapping. Note that in ``"sqrt"`` mode,
440
+ the first derivative in ``x`` uses the internal 2nd coefficient,
441
+ and the second derivative uses the internal 4th coefficient.
442
+
443
+ Args:
444
+ mode: Sampling/transform mode (``"x"``, ``"signed_log"``, or ``"sqrt"``).
445
+ order: Derivative order to return (``>= 1``). For transformed modes, only
446
+ orders 1 and 2 are supported.
447
+ coeffs: Power-basis coefficients with columns per component, shape
448
+ ``(deg+1, n_components)``.
449
+ factor: Positive scaling factor such that physical offsets satisfy
450
+ ``t = u * factor``.
451
+ x0: Physical expansion point where the derivative is evaluated.
452
+ sign_used: For ``"sqrt"`` mode, the branch sign (``+1`` or ``-1``). Ignored
453
+ for other modes.
454
+
455
+ Returns:
456
+ np.ndarray: The requested derivative at ``x0`` with shape ``(n_components,)``.
457
+
458
+ Raises:
459
+ NotImplementedError: If ``mode`` is ``"signed_log"`` or ``"sqrt"`` and
460
+ ``order`` is not 1 or 2.
461
+ """
462
+ if mode == "signed_log":
463
+ d1 = extract_derivative(coeffs, 1, factor)
464
+ if order == 1:
465
+ return signed_log_derivatives_to_x(1, x0, d1)
466
+ d2 = extract_derivative(coeffs, 2, factor)
467
+ return signed_log_derivatives_to_x(2, x0, d1, d2)
468
+
469
+ if mode == "sqrt":
470
+ s = +1.0 if (sign_used is None) else float(sign_used)
471
+ if order == 1:
472
+ g2 = extract_derivative(coeffs, 2, factor)
473
+ return sqrt_derivatives_to_x_at_zero(1, s, g2=g2)
474
+ g4 = extract_derivative(coeffs, 4, factor)
475
+ return sqrt_derivatives_to_x_at_zero(2, s, g4=g4)
476
+
477
+ return extract_derivative(coeffs, order, factor)
478
+
479
+
480
+ def _choose_stable_degree(u: np.ndarray,
481
+ deg_min: int,
482
+ deg_max: int,
483
+ cond_max: float = 1e8) -> int:
484
+ """Picks the highest degree of a Vandermonde matrix with acceptable condition number.
485
+
486
+ This utility inspects the Vandermonde matrix for degrees from deg_max down to
487
+ deg_min, returning the highest degree whose condition number is below cond_max. If
488
+ no degree in the range meets the condition number criterion, deg_min (or 0 if
489
+ deg_min < 0) is returned.
490
+
491
+ Args:
492
+ u: 1D array of scaled independent variable values (offsets).
493
+ deg_min: Minimum degree to consider (integer, >= 0).
494
+ deg_max: Maximum degree to consider (integer, >= deg_min).
495
+ cond_max: Maximum acceptable condition number (default 1e8).
496
+
497
+ Returns:
498
+ int: Chosen polynomial degree.
499
+ """
500
+ u = np.asarray(u, dtype=float)
501
+ if deg_max < deg_min:
502
+ return max(deg_min, 0)
503
+
504
+ best = deg_min
505
+ for deg in range(deg_max, deg_min - 1, -1):
506
+ vander = _vandermonde(u, deg)
507
+ s = np.linalg.svd(vander, compute_uv=False)
508
+ if s[-1] <= 0.0:
509
+ continue
510
+ cond = s[0] / s[-1]
511
+ if np.isfinite(cond) and cond <= cond_max:
512
+ return deg # as soon as we find a stable high degree
513
+ return best
@@ -0,0 +1,66 @@
1
+ """Convert a spacing spec ('auto', '<p>%', or number) into a positive step size."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import numpy as np
6
+
7
+ __all__ = ["resolve_spacing"]
8
+
9
+
10
+ def resolve_spacing(spacing: str | float,
11
+ x0: float,
12
+ base_abs: float | None) -> float:
13
+ """Return a positive step size h around x0; 'auto' and '<p>%' scale with the absolute value of x0, numeric inputs are absolute, and base_abs sets the floor.
14
+
15
+ Converts a user-facing spacing option into a numeric spacing h suitable for
16
+ finite-difference or sampling routines. The 'auto' mode corresponds to 2% of
17
+ the magnitude of x0 (i.e., 0.02 * abs(x0)) but never below the floor.
18
+
19
+ If the scaled value is below the floor, the result is the floor (e.g., with the
20
+ default 1e-3 and x0≈1e-6, 'auto' returns 1e-3); pass base_abs to choose a
21
+ smaller floor. Numeric inputs are absolute and do not use the floor;
22
+ the floor applies only to "auto" and "<p>%".
23
+
24
+ Args:
25
+ spacing: "auto", a percentage (e.g. a string representing a percentage "2%"), or a positive number.
26
+ x0: Point at which the derivative is evaluated; scale reference for "auto"
27
+ and percentages.
28
+ base_abs: Absolute lower bound for h (defaults to 1e-3 if None).
29
+
30
+ Returns:
31
+ float: A positive, finite spacing value.
32
+
33
+ Raises:
34
+ ValueError: If spacing is invalid (e.g. non-positive/NaN number, malformed
35
+ percent, or unsupported type).
36
+ """
37
+ floor = 1e-3 if base_abs is None else float(base_abs)
38
+
39
+ # numeric absolute spacing (floor does not apply to explicit numbers)
40
+ if isinstance(spacing, (int, float)):
41
+ h = float(spacing)
42
+ if not np.isfinite(h) or h <= 0:
43
+ raise ValueError("numeric spacing must be positive and finite.")
44
+ return h
45
+
46
+ # auto: scale with absolute value of x0 but never below floor
47
+ if spacing == "auto":
48
+ h = 0.02 * abs(float(x0))
49
+ return float(max(h, floor))
50
+
51
+ # percent like '2%'
52
+ if isinstance(spacing, str) and spacing.strip().endswith("%"):
53
+ s = spacing.strip()
54
+ try:
55
+ frac = float(s[:-1]) / 100.0
56
+ except ValueError:
57
+ raise ValueError(f"invalid percent spacing: {spacing!r}")
58
+ if not np.isfinite(frac) or frac <= 0:
59
+ raise ValueError("percent spacing must be > 0.")
60
+ h = frac * abs(float(x0))
61
+ # If x0 == 0 or too small, fall back to floor
62
+ return float(max(h, floor))
63
+
64
+ raise ValueError(
65
+ "spacing must be 'auto', a percent like '2%', or a positive number."
66
+ )