derivkit 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. derivkit/__init__.py +22 -0
  2. derivkit/calculus/__init__.py +17 -0
  3. derivkit/calculus/calculus_core.py +152 -0
  4. derivkit/calculus/gradient.py +97 -0
  5. derivkit/calculus/hessian.py +528 -0
  6. derivkit/calculus/hyper_hessian.py +296 -0
  7. derivkit/calculus/jacobian.py +156 -0
  8. derivkit/calculus_kit.py +128 -0
  9. derivkit/derivative_kit.py +315 -0
  10. derivkit/derivatives/__init__.py +6 -0
  11. derivkit/derivatives/adaptive/__init__.py +5 -0
  12. derivkit/derivatives/adaptive/adaptive_fit.py +238 -0
  13. derivkit/derivatives/adaptive/batch_eval.py +179 -0
  14. derivkit/derivatives/adaptive/diagnostics.py +325 -0
  15. derivkit/derivatives/adaptive/grid.py +333 -0
  16. derivkit/derivatives/adaptive/polyfit_utils.py +513 -0
  17. derivkit/derivatives/adaptive/spacing.py +66 -0
  18. derivkit/derivatives/adaptive/transforms.py +245 -0
  19. derivkit/derivatives/autodiff/__init__.py +1 -0
  20. derivkit/derivatives/autodiff/jax_autodiff.py +95 -0
  21. derivkit/derivatives/autodiff/jax_core.py +217 -0
  22. derivkit/derivatives/autodiff/jax_utils.py +146 -0
  23. derivkit/derivatives/finite/__init__.py +5 -0
  24. derivkit/derivatives/finite/batch_eval.py +91 -0
  25. derivkit/derivatives/finite/core.py +84 -0
  26. derivkit/derivatives/finite/extrapolators.py +511 -0
  27. derivkit/derivatives/finite/finite_difference.py +247 -0
  28. derivkit/derivatives/finite/stencil.py +206 -0
  29. derivkit/derivatives/fornberg.py +245 -0
  30. derivkit/derivatives/local_polynomial_derivative/__init__.py +1 -0
  31. derivkit/derivatives/local_polynomial_derivative/diagnostics.py +90 -0
  32. derivkit/derivatives/local_polynomial_derivative/fit.py +199 -0
  33. derivkit/derivatives/local_polynomial_derivative/local_poly_config.py +95 -0
  34. derivkit/derivatives/local_polynomial_derivative/local_polynomial_derivative.py +205 -0
  35. derivkit/derivatives/local_polynomial_derivative/sampling.py +72 -0
  36. derivkit/derivatives/tabulated_model/__init__.py +1 -0
  37. derivkit/derivatives/tabulated_model/one_d.py +247 -0
  38. derivkit/forecast_kit.py +783 -0
  39. derivkit/forecasting/__init__.py +1 -0
  40. derivkit/forecasting/dali.py +78 -0
  41. derivkit/forecasting/expansions.py +486 -0
  42. derivkit/forecasting/fisher.py +298 -0
  43. derivkit/forecasting/fisher_gaussian.py +171 -0
  44. derivkit/forecasting/fisher_xy.py +357 -0
  45. derivkit/forecasting/forecast_core.py +313 -0
  46. derivkit/forecasting/getdist_dali_samples.py +429 -0
  47. derivkit/forecasting/getdist_fisher_samples.py +235 -0
  48. derivkit/forecasting/laplace.py +259 -0
  49. derivkit/forecasting/priors_core.py +860 -0
  50. derivkit/forecasting/sampling_utils.py +388 -0
  51. derivkit/likelihood_kit.py +114 -0
  52. derivkit/likelihoods/__init__.py +1 -0
  53. derivkit/likelihoods/gaussian.py +136 -0
  54. derivkit/likelihoods/poisson.py +176 -0
  55. derivkit/utils/__init__.py +13 -0
  56. derivkit/utils/concurrency.py +213 -0
  57. derivkit/utils/extrapolation.py +254 -0
  58. derivkit/utils/linalg.py +513 -0
  59. derivkit/utils/logger.py +26 -0
  60. derivkit/utils/numerics.py +262 -0
  61. derivkit/utils/sandbox.py +74 -0
  62. derivkit/utils/types.py +15 -0
  63. derivkit/utils/validate.py +811 -0
  64. derivkit-1.0.0.dist-info/METADATA +50 -0
  65. derivkit-1.0.0.dist-info/RECORD +68 -0
  66. derivkit-1.0.0.dist-info/WHEEL +5 -0
  67. derivkit-1.0.0.dist-info/licenses/LICENSE +21 -0
  68. derivkit-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,247 @@
1
+ """Provides the FiniteDifferenceDerivative class.
2
+
3
+ The user must specify the function to differentiate and the central value
4
+ at which the derivative should be evaluated. More details about available
5
+ options can be found in the documentation of the methods.
6
+ """
7
+
8
+ from collections.abc import Callable
9
+ from functools import partial
10
+
11
+ import numpy as np
12
+
13
+ from derivkit.derivatives.finite.core import single_finite_step
14
+ from derivkit.derivatives.finite.extrapolators import (
15
+ adaptive_gre_fd,
16
+ adaptive_richardson_fd,
17
+ adaptive_ridders_fd,
18
+ fixed_gre_fd,
19
+ fixed_richardson_fd,
20
+ fixed_ridders_fd,
21
+ )
22
+ from derivkit.derivatives.finite.stencil import (
23
+ TRUNCATION_ORDER,
24
+ validate_supported_combo,
25
+ )
26
+
27
+
28
+ class FiniteDifferenceDerivative:
29
+ """Computes numerical derivatives using central finite difference stencils.
30
+
31
+ This class supports the calculation of first to fourth-order derivatives
32
+ for scalar or vector-valued functions. It uses high-accuracy central
33
+ difference formulas with configurable stencil sizes (3-, 5-, 7-, or 9-point).
34
+
35
+ For scalar-valued functions, a single float is returned. For vector-valued
36
+ functions, the derivative is computed component-wise and returned as a
37
+ NumPy array.
38
+
39
+ Attributes:
40
+ function: The function to differentiate. Must accept a single
41
+ float and return either a float or a 1D array-like object.
42
+ x0: The point at which the derivative is evaluated.
43
+
44
+ Supported Stencil and Derivative Combinations
45
+ ---------------------------------------------
46
+ - 3-point: first-order only
47
+ - 5-point: first to fourth-order
48
+ - 7-point: first and second-order
49
+ - 9-point: first and second-order
50
+
51
+ Examples:
52
+ ---------
53
+ >>> import numpy as np
54
+ >>> from derivkit.derivatives.finite.finite_difference import FiniteDifferenceDerivative
55
+
56
+ Basic second derivative without extrapolation:
57
+
58
+ >>> f = lambda x: x**3
59
+ >>> d = FiniteDifferenceDerivative(function=f, x0=2.0)
60
+ >>> round(float(d.differentiate(order=2)), 6)
61
+ 12.0
62
+
63
+ First derivative with Ridders extrapolation and an error estimate:
64
+
65
+ >>> g = np.sin
66
+ >>> d = FiniteDifferenceDerivative(function=g, x0=0.7)
67
+ >>> val, err = d.differentiate(
68
+ ... order=1,
69
+ ... stepsize=1e-2,
70
+ ... num_points=5,
71
+ ... extrapolation="ridders",
72
+ ... levels=4,
73
+ ... return_error=True,
74
+ ... )
75
+ >>> bool(np.allclose(val, np.cos(0.7), rtol=1e-6))
76
+ True
77
+
78
+ Vector-valued function with Gauss–Richardson extrapolation:
79
+
80
+ >>> def vec_func(x):
81
+ ... return np.array([np.sin(x), np.cos(x)])
82
+ >>> d = FiniteDifferenceDerivative(function=vec_func, x0=0.3)
83
+ >>> val = d.differentiate(
84
+ ... order=1,
85
+ ... stepsize=1e-2,
86
+ ... num_points=5,
87
+ ... extrapolation="gauss-richardson",
88
+ ... levels=4,
89
+ ... )
90
+ >>> val.shape
91
+ (2,)
92
+ """
93
+
94
+ def __init__(
95
+ self,
96
+ function: Callable,
97
+ x0: float,
98
+ ) -> None:
99
+ """Initialises the class based on function and central value.
100
+
101
+ Arguments:
102
+ function: The function to differentiate. Must accept a single
103
+ float and return either a float or a 1D array-like object.
104
+ x0: The point at which the derivative is evaluated.
105
+ """
106
+ self.function = function
107
+ self.x0 = x0
108
+
109
+ def differentiate(
110
+ self,
111
+ order: int = 1,
112
+ stepsize: float = 0.01,
113
+ num_points: int = 5,
114
+ n_workers: int = 1,
115
+ extrapolation: str | None = None,
116
+ levels: int | None = None,
117
+ return_error: bool = False,
118
+ ) -> np.ndarray[float] | float:
119
+ """Computes the derivative using a central finite difference scheme.
120
+
121
+ Supports 3-, 5-, 7-, or 9-point central difference stencils for
122
+ derivative orders 1 through 4 (depending on the stencil size).
123
+ Derivatives are computed for scalar or vector-valued functions.
124
+ Allows for optional extrapolation (Richardson or Ridders) to improve accuracy.
125
+ It also returns an error estimate if requested.
126
+
127
+ Args:
128
+ order: The order of the derivative to compute. Must be supported by
129
+ the chosen stencil size. Default is ``1``.
130
+ stepsize: Step size (h) used to evaluate the function around the
131
+ central value. Default is ``0.01``.
132
+ num_points: Number of points in the finite difference stencil.
133
+ Must be one of ``[3, 5, 7, 9]``. Default is ``5``.
134
+ n_workers: Number of workers to use in multiprocessing.
135
+ Default is ``1`` (no multiprocessing).
136
+ extrapolation: Extrapolation scheme to use for improving accuracy.
137
+ Supported options are:
138
+
139
+ * ``None``: no extrapolation (single finite difference).
140
+ * ``"richardson"``:
141
+
142
+ - fixed-level if ``levels`` is not ``None``
143
+ - adaptive if ``levels`` is ``None``
144
+
145
+ * ``"ridders"``:
146
+
147
+ - fixed-level if ``levels`` is not ``None``
148
+ - adaptive if ``levels`` is ``None``
149
+
150
+ * ``"gauss-richardson"`` or ``"gre"``:
151
+
152
+ - fixed-level if ``levels`` is not ``None``
153
+ - adaptive if ``levels`` is ``None``
154
+
155
+ levels: Number of extrapolation levels for fixed schemes.
156
+ If ``None``, the chosen extrapolation method runs in
157
+ adaptive mode where supported.
158
+ return_error: If ``True``, also return an error estimate from
159
+ the extrapolation (or two-step) routine.
160
+
161
+ Returns:
162
+ The estimated derivative. Returns a float for scalar-valued
163
+ functions, or a NumPy array for vector-valued functions.
164
+
165
+ Raises:
166
+ ValueError:
167
+ If the combination of ``num_points`` and ``order`` is not
168
+ supported or if an unknown extrapolation scheme is given.
169
+
170
+ Notes:
171
+ The available (num_points, order) combinations are:
172
+ - 3: order 1
173
+ - 5: orders 1, 2, 3, 4
174
+ - 7: orders 1, 2
175
+ - 9: orders 1, 2
176
+ """
177
+ if stepsize <= 0:
178
+ raise ValueError("stepsize must be positive.")
179
+
180
+ validate_supported_combo(num_points, order)
181
+
182
+ # We set up a partial function for single finite difference step
183
+ single = partial(
184
+ single_finite_step,
185
+ self.function,
186
+ self.x0,
187
+ )
188
+
189
+ # If we just want bare finite difference (no extrapolation)
190
+ if extrapolation is None:
191
+ value = single(order, stepsize, num_points, n_workers)
192
+
193
+ if not return_error:
194
+ return value
195
+
196
+ # Our secret second evaluation at h/2 to get a crude error estimate
197
+ r = 2.0
198
+ value_refined = single(order, stepsize / r, num_points, n_workers)
199
+
200
+ val_arr = np.asarray(value, dtype=float)
201
+ ref_arr = np.asarray(value_refined, dtype=float)
202
+ err_arr = np.abs(val_arr - ref_arr)
203
+
204
+ if np.isscalar(value) or np.shape(value) == ():
205
+ err_out: float | np.ndarray = float(err_arr)
206
+ else:
207
+ err_out = err_arr
208
+
209
+ return value, err_out
210
+
211
+ # If we wanted extrapolation, get the truncation order first
212
+ key = (num_points, order)
213
+ p = TRUNCATION_ORDER.get(key)
214
+ if p is None:
215
+ raise ValueError(
216
+ f"Extrapolation not configured for stencil {key}."
217
+ )
218
+
219
+ # Choose extrapolator function based on scheme + levels
220
+ if extrapolation == "richardson":
221
+ extrap_fn = (fixed_richardson_fd if levels is not None else adaptive_richardson_fd)
222
+ elif extrapolation == "ridders":
223
+ extrap_fn = (fixed_ridders_fd if levels is not None else adaptive_ridders_fd)
224
+ elif extrapolation in {"gauss-richardson", "gre"}:
225
+ extrap_fn = fixed_gre_fd if levels is not None else adaptive_gre_fd
226
+ else:
227
+ raise ValueError(f"Unknown extrapolation scheme: {extrapolation!r}")
228
+
229
+ # Common kwargs for all extrapolators
230
+ extrap_kwargs: dict = dict(
231
+ single_finite=single,
232
+ order=order,
233
+ stepsize=stepsize,
234
+ num_points=num_points,
235
+ n_workers=n_workers,
236
+ p=p,
237
+ )
238
+ if levels is not None:
239
+ extrap_kwargs["levels"] = levels
240
+
241
+ if return_error:
242
+ extrap_kwargs["return_error"] = True
243
+ value, err = extrap_fn(**extrap_kwargs)
244
+ return value, err
245
+
246
+ # If no error requested, just return value of the estimated derivative
247
+ return extrap_fn(**extrap_kwargs)
@@ -0,0 +1,206 @@
1
+ """Stencil definitions and utilities for finite-difference derivative calculations."""
2
+
3
+ import math
4
+
5
+ import numpy as np
6
+ from numpy.typing import NDArray
7
+
8
+ FloatArray = NDArray[np.float64]
9
+
10
+ __all__ = [
11
+ "get_finite_difference_tables",
12
+ "validate_supported_combo",
13
+ "SUPPORTED_BY_STENCIL",
14
+ "TRUNCATION_ORDER",
15
+ "STENCILS",
16
+ "ORDERS",
17
+ ]
18
+
19
+
20
+ #: A list of supported stencil sizes.
21
+ STENCILS = (3, 5, 7, 9)
22
+ #: A list of supported derivative orders.
23
+ ORDERS = (1, 2, 3, 4)
24
+
25
+
26
+ def supported_orders(
27
+ num_points: int,
28
+ *,
29
+ max_order: int = 4,
30
+ ) -> set[int]:
31
+ """Creates a list of supported derivative orders for a given stencil size, and a maximum derivative order.
32
+
33
+ Args:
34
+ num_points: Number of points in the stencil. The supported stencil sizes are defined in :data:`STENCILS`.
35
+ max_order: The maximum supported derivative order.
36
+
37
+ Returns:
38
+ The list of supported derivative orders for the given stencil size.
39
+
40
+ Raises:
41
+ ValueError: If ``num_points`` is not in :data:`STENCILS`.
42
+ ValueError: If ``max_order < 1`` or ``max_order > 4``.
43
+ """
44
+ if num_points not in STENCILS:
45
+ raise ValueError(f"num_points must be one of {STENCILS}")
46
+ if max_order < 1:
47
+ raise ValueError("max_order must be at least 1")
48
+ if max_order > max(ORDERS):
49
+ raise ValueError(f"max_order must be {max(ORDERS)} or less")
50
+
51
+ return set(range(1, min(max_order, num_points - 1) + 1))
52
+
53
+
54
+ # Dictionary containing the derivative orders by the available stencil sizes.
55
+ SUPPORTED_BY_STENCIL = {n: supported_orders(n) for n in STENCILS}
56
+
57
+
58
+ def _central_offsets(
59
+ num_points: int,
60
+ ) -> FloatArray:
61
+ """Creates a grid of central offset values for a desired number of points.
62
+
63
+ Args:
64
+ num_points: Number of points desired in the grid.
65
+
66
+ Returns:
67
+ An array of offset values centered at zero.
68
+ """
69
+ half = num_points // 2
70
+ return np.arange(-half, half + 1, dtype=np.float64)
71
+
72
+
73
+ def truncation_order_from_coeffs(
74
+ offsets: FloatArray,
75
+ coeffs: FloatArray,
76
+ deriv_order: int,
77
+ tol: float = 1e-12,
78
+ ) -> int:
79
+ """Computes the truncation order from the coefficients and offsets, for some numerical tolerance.
80
+
81
+ Args:
82
+ offsets: Array of integer offsets for the finite difference stencil.
83
+ coeffs: Array of finite difference coefficients.
84
+ deriv_order: The requested derivative order.
85
+ tol: Numerical tolerance used to determine the truncation order.
86
+
87
+ Returns:
88
+ The truncation order for the given numerical tolerance.
89
+ """
90
+ m = deriv_order
91
+ max_r = 40
92
+
93
+ for r in range(m + 1, max_r + 1):
94
+ moment = float(np.dot(coeffs, offsets**r))
95
+ if abs(moment) > tol:
96
+ return r - m
97
+ raise RuntimeError("Could not detect truncation order.")
98
+
99
+
100
+ def _finite_difference_coeffs(
101
+ offsets: list[int] | NDArray,
102
+ deriv_order: int,
103
+ stepsize: float,
104
+ ) -> NDArray:
105
+ """Compute finite difference coefficients for given offsets and derivative order.
106
+
107
+ This method solves a linear system to find the coefficients that
108
+ approximate the derivative of specified order using the provided offsets.
109
+
110
+ Args:
111
+ offsets: List or array of integer offsets for the finite difference stencil.
112
+ deriv_order: The order of the derivative to approximate.
113
+ stepsize: The stepsize used in the finite difference calculation.
114
+
115
+ Returns:
116
+ An array of finite difference coefficients.
117
+ """
118
+ offsets = np.asarray(offsets, dtype=float)
119
+ n = offsets.size
120
+
121
+ matrix = np.zeros((n, n), dtype=float)
122
+ b = np.zeros(n, dtype=float)
123
+
124
+ # Match Taylor expansion up to degree n-1
125
+ for k in range(n):
126
+ matrix[k, :] = offsets**k / math.factorial(k)
127
+ b[deriv_order] = 1.0 # enforce correct derivative of order m
128
+
129
+ coeffs = np.linalg.solve(matrix, b) / (stepsize**deriv_order)
130
+ return coeffs
131
+
132
+
133
+ def _build_truncation_orders(
134
+ ) -> dict[tuple[int, int], int]:
135
+ """Dynamically computes the truncation orders for the supported stencil combinations.
136
+
137
+ Returns:
138
+ A dictionary of truncation order for the supported stencil sizes and derivative orders.
139
+ """
140
+ out: dict[tuple[int, int], int] = {}
141
+ h = 1.0
142
+ for n in STENCILS:
143
+ k = _central_offsets(n)
144
+ for m in supported_orders(n):
145
+ c = _finite_difference_coeffs(k, m, h)
146
+ out[(n, m)] = truncation_order_from_coeffs(k, c, m)
147
+ return out
148
+
149
+
150
+ #: Dictionary of truncation order for the supported stencil sizes and derivative orders.
151
+ TRUNCATION_ORDER = _build_truncation_orders()
152
+
153
+
154
+ def get_finite_difference_tables(
155
+ stepsize: float,
156
+ ) -> tuple[dict[int, list[int]], dict[tuple[int, int], np.ndarray]]:
157
+ """Dynamically computes offset patterns and coefficient tables.
158
+
159
+ Args:
160
+ stepsize: The step size to use for the stencil spacing.
161
+
162
+ Returns:
163
+ A dictionary of offsets, and a dictionary of coefficient tables,
164
+ for a range of supported stencil sizes and derivative orders.
165
+ """
166
+ offsets = {n: _central_offsets(n).astype(int).tolist() for n in STENCILS}
167
+ coeffs_table: dict[tuple[int, int], FloatArray] = {}
168
+
169
+ for n in STENCILS:
170
+ k = _central_offsets(n)
171
+ for m in supported_orders(n):
172
+ coeffs_table[(n, m)] = _finite_difference_coeffs(k, m, stepsize)
173
+
174
+ return offsets, coeffs_table
175
+
176
+
177
+ def validate_supported_combo(
178
+ num_points: int,
179
+ order: int,
180
+ ) -> None:
181
+ """Validates that the (stencil size, order) combo is supported.
182
+
183
+ Args:
184
+ num_points: Number of points in the finite difference stencil.
185
+ order: The order of the derivative to compute.
186
+
187
+ Raises:
188
+ ValueError: If the combination of num_points and order is not supported.
189
+ """
190
+ if num_points not in STENCILS:
191
+ raise ValueError(
192
+ f"[FiniteDifference] Unsupported stencil size: {num_points}. "
193
+ f"Must be one of {list(STENCILS)}."
194
+ )
195
+ if order not in ORDERS:
196
+ raise ValueError(
197
+ f"[FiniteDifference] Unsupported derivative order: {order}. "
198
+ f"Must be one of {list(ORDERS)}."
199
+ )
200
+
201
+ allowed = SUPPORTED_BY_STENCIL[num_points]
202
+ if order not in allowed:
203
+ raise ValueError(
204
+ "[FiniteDifference] Not implemented yet: "
205
+ f"{num_points}-point stencil for order {order}.\n"
206
+ )
@@ -0,0 +1,245 @@
1
+ """Implementation of Fornberg's algorithm for numerical derivatives.
2
+
3
+ The algorithm was published by Fornberg in:
4
+ Bengt Fornberg, *Calculation of Weights in Finite Difference Formulas*,
5
+ SIAM Review, vol. 40, No. 3, pp. 685–691, September 1998
6
+
7
+ Examples:
8
+ ---------
9
+ Calculating the derivative at a single value::
10
+ >>> import numpy as np
11
+ >>> from derivkit.derivatives.fornberg import FornbergDerivative
12
+ >>> x0 = np.pi/4
13
+ >>> grid = np.array([-0.3, -0.25, -0.1, 0, 0.12])
14
+ >>> fornberg = FornbergDerivative(lambda x: np.tan(x), x0)
15
+ >>> bool(np.isclose(
16
+ ... fornberg.differentiate(grid=grid, order=1),
17
+ ... 2.0022106298738143,
18
+ ... rtol=1e-14,
19
+ ... atol=0.0,
20
+ ... ))
21
+ True
22
+
23
+ Calculating the derivative at an array of values using uniform offsets::
24
+ >>> import numpy as np
25
+ >>> from derivkit.derivatives.fornberg import FornbergDerivative
26
+ >>> x0 = np.array([
27
+ ... [[1, 2],
28
+ ... [3, 4]],
29
+ ... [[5, 6],
30
+ ... [7,8]]
31
+ ... ])
32
+ >>> grid = np.array([-0.34, -0.02, 0.1, 0.34, 0.98])
33
+ >>> fornberg = FornbergDerivative(lambda x: np.cos(x), x0)
34
+ >>> np.allclose(
35
+ ... fornberg.differentiate(grid=grid, order=1),
36
+ ... -np.sin(x0),
37
+ ... rtol=1e-4,
38
+ ... atol=0.0,
39
+ ... )
40
+ True
41
+
42
+ Calculating the derivative at an array of values using 5 unique offsets for
43
+ each evaluation point::
44
+ >>> import numpy as np
45
+ >>> from derivkit.derivatives.fornberg import FornbergDerivative
46
+ >>> x0 = np.array([2, 7, 10, -np.pi])
47
+ >>> grid = np.array([
48
+ ... [-0.34, -0.02, 0.1, 0.34, 0.98],
49
+ ... [-0.4, -0.2, -0.1, 0.14, 0.68],
50
+ ... [-0.5, -0.12, 0.15, 0.64, 0.78],
51
+ ... [-0.1, 0, 0.06, 0.24, 0.8]
52
+ ... ]).T
53
+ >>> fornberg = FornbergDerivative(lambda x: np.cos(x), x0)
54
+ >>> np.allclose(
55
+ ... fornberg.differentiate(grid=grid, order=1),
56
+ ... -np.sin(x0),
57
+ ... rtol=1e-4,
58
+ ... atol=1e-4,
59
+ ... )
60
+ True
61
+ """
62
+
63
+ from __future__ import annotations
64
+
65
+ from collections.abc import Callable
66
+
67
+ import numpy as np
68
+ from numpy.typing import NDArray
69
+
70
+ from derivkit.utils.types import Array
71
+
72
+
73
+ class FornbergDerivative:
74
+ """Supplies the Fornberg derivative.
75
+
76
+ The Fornberg derivative relies on the interpolation of function values
77
+ by Lagrange polynomials. For more information see Bengt Fornberg,
78
+ *Calculation of Weights in Finite Difference Formulas*, SIAM Review,
79
+ vol. 40, No. 3, pp. 685–691, September 1998.
80
+
81
+ Attributes:
82
+ function: the function to be differentiated. Must accept a single float
83
+ and return a float, and must be vectorizable.
84
+ x0: the evaluation points for the derivative. Must be a float or a
85
+ structure castable to a Numpy array. If it castable to a Numpy array
86
+ the function and its derivative will be vectorized over the array.
87
+ """
88
+
89
+ def __init__(
90
+ self,
91
+ function: Callable[[np.floating], np.floating],
92
+ x0: np.float64 | NDArray[np.floating],
93
+ ) -> None:
94
+ """Initialises the class.
95
+
96
+ Args:
97
+ function: the function to be differentiated. Must accept a single
98
+ float and return a float, and must be vectorizable.
99
+ x0: the evaluation points for the derivative. Must be a float or a
100
+ structure castable to a Numpy array. If it castable to a Numpy
101
+ array the function and its derivative will be vectorized over
102
+ the array.
103
+ """
104
+ self.function = function
105
+
106
+ temp_array = np.asarray(x0)
107
+ self.original_shape = temp_array.shape
108
+ self.x0 = np.ravel(temp_array)
109
+
110
+
111
+ def differentiate(
112
+ self,
113
+ *,
114
+ grid: NDArray[np.float64],
115
+ order: int = 1,
116
+ ) -> Array:
117
+ """Constructs the derivative of a given order of a function at a point.
118
+
119
+ The derivative is constructed by recursively differentiating the
120
+ Lagrange polynomials that approximate the function around the
121
+ evaluation points.
122
+
123
+ Because the derivative of a given order is constructed from the
124
+ derivatives of lower orders (this is part of the recursion) the
125
+ method is capable of returning all derivatives up to the specified
126
+ order. However, currently only the highest derivative is returned.
127
+
128
+ See section 3 of (Fornberg 1998) for more details.
129
+
130
+ Args:
131
+ grid: an array of offsets relative to the evaluation points. These
132
+ points specify the Lagrange interpolation of the function,
133
+ and are used to calculate the derivatives. The following
134
+ forms are supported:
135
+
136
+ * If ``grid`` is a 1D array it is assumed that it contains
137
+ linear offsets from the evaluation points. These
138
+ are added uniformly to :data:`FornbergDerivative.x0`.
139
+
140
+ * If ``grid`` is an ND array it is assumed that it contains
141
+ unique linear offsets for each evaluation point. In this
142
+ case the first axis of the grid must correspond to the
143
+ offsets while the remaining axes must be equal to the shape
144
+ of :data:`FornbergDerivative.x0`. In brief, in this case the
145
+ grid must be of shape ``(n, *x0.shape)`` for some integer
146
+ ``n``.
147
+
148
+ The advantage of this is that, in this case, the offsets can
149
+ be tuned for each evaluation point, although each point must
150
+ still be given the same number of offsets.
151
+
152
+ order: the order of the derivative. Must be a non-negative number.
153
+ The case of ``order==0`` corresponds to the Lagrange
154
+ interpolation of the function.
155
+
156
+ Returns:
157
+ The derivative of :data:`FornbergDerivative.function` evaluated at
158
+ :data:`FornbergDerivative.x0`.
159
+
160
+ Raises:
161
+ ValueError: if ``order`` is smaller than ``0``.
162
+ RuntimeWarning: if ``grid`` contains duplicate offsets for
163
+ a given point.
164
+ """
165
+ if order < 0:
166
+ raise ValueError(
167
+ "the maximum derivative order must be at least 0 "
168
+ f" (the function itself), but is {order}."
169
+ )
170
+
171
+ if grid.ndim == 1:
172
+ input_grid = self.x0 + grid[:, np.newaxis]
173
+ else:
174
+ input_grid = self.x0 + grid.reshape(grid.shape[0], -1)
175
+
176
+ y = self.function(input_grid)
177
+ try:
178
+ weights = np.zeros((*input_grid.shape, order+1), dtype=np.float64)
179
+ # If the algorithm fails then most likely the Lagrange polyonial is not
180
+ # defined for input_grid. This usually means that the grid has duplicate
181
+ # entries for a given point, so the algorithm has a divide-by-zero problem.
182
+ except RuntimeWarning:
183
+ raise RuntimeError(
184
+ "Fornberg derivative failed. "
185
+ "Normally this means that the offset grid does not allow for "
186
+ "valid Lagrange polynomials. Make sure that the offsets are "
187
+ "unique for each point."
188
+ )
189
+
190
+ # Numpy passes around references to the array data so the weights
191
+ # are updated in-place. No assignment is necessary.
192
+ self._get_weights(weights, input_grid, order)
193
+ # np.dot contracts the last axis of its first argument with the
194
+ # second to last index of its second argument. The axes are permuted
195
+ # to ensure that the function values are contracted with the
196
+ # coefficients corresponding with the ``order``-th order derivative.
197
+ # TODO: See if this can be made more transparant.
198
+ derivatives = np.dot(
199
+ y.T,
200
+ np.swapaxes(weights, 0, 1)
201
+ )[np.arange(self.x0.size), np.arange(self.x0.size), -1]
202
+
203
+ return derivatives.reshape(self.original_shape)
204
+
205
+ def _get_weights(
206
+ self,
207
+ weights: NDArray[np.float64],
208
+ grid: NDArray[np.float64],
209
+ order: int = 1,
210
+ ) -> None:
211
+ """Constructs the weights needed for derivatives up to a given order.
212
+
213
+ Args:
214
+ weights: the coefficients for the differentiated Lagrange
215
+ polynomials. The coefficients are updated in place.
216
+ grid: a series of offsets around the evaluation points.
217
+ order: the order of the derivative.
218
+ """
219
+ c1 = 1.0
220
+ c4 = grid[0, :] - self.x0
221
+ weights[0, ..., 0] = 1.0
222
+
223
+ for i in range(1, grid.shape[0]):
224
+ mn = min(i, order)
225
+ c2 = 1.0
226
+ c5 = c4
227
+ c4 = grid[i, :] - self.x0
228
+
229
+ for j in range(i):
230
+ c3 = grid[i, ...] - grid[j, ...]
231
+ c2 *= c3
232
+
233
+ if i == j + 1:
234
+ for k in range(mn, 0, -1):
235
+ weights[i, ..., k] = c1 * (
236
+ k * weights[i-1, ..., k-1]
237
+ - c5 * weights[i-1, ..., k]
238
+ ) / c2
239
+ weights[i, ..., 0] = -c1 * c5 * weights[i-1, ..., 0] / c2
240
+
241
+ for k in range(mn, 0, -1):
242
+ weights[j, ..., k] = (c4 * weights[j, ..., k] - k * weights[j, ..., k-1]) / c3
243
+ weights[j, ..., 0] = c4 * weights[j, ..., 0] / c3
244
+
245
+ c1 = c2
@@ -0,0 +1 @@
1
+ """Local polynomial derivative init."""