derivkit 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. derivkit/__init__.py +22 -0
  2. derivkit/calculus/__init__.py +17 -0
  3. derivkit/calculus/calculus_core.py +152 -0
  4. derivkit/calculus/gradient.py +97 -0
  5. derivkit/calculus/hessian.py +528 -0
  6. derivkit/calculus/hyper_hessian.py +296 -0
  7. derivkit/calculus/jacobian.py +156 -0
  8. derivkit/calculus_kit.py +128 -0
  9. derivkit/derivative_kit.py +315 -0
  10. derivkit/derivatives/__init__.py +6 -0
  11. derivkit/derivatives/adaptive/__init__.py +5 -0
  12. derivkit/derivatives/adaptive/adaptive_fit.py +238 -0
  13. derivkit/derivatives/adaptive/batch_eval.py +179 -0
  14. derivkit/derivatives/adaptive/diagnostics.py +325 -0
  15. derivkit/derivatives/adaptive/grid.py +333 -0
  16. derivkit/derivatives/adaptive/polyfit_utils.py +513 -0
  17. derivkit/derivatives/adaptive/spacing.py +66 -0
  18. derivkit/derivatives/adaptive/transforms.py +245 -0
  19. derivkit/derivatives/autodiff/__init__.py +1 -0
  20. derivkit/derivatives/autodiff/jax_autodiff.py +95 -0
  21. derivkit/derivatives/autodiff/jax_core.py +217 -0
  22. derivkit/derivatives/autodiff/jax_utils.py +146 -0
  23. derivkit/derivatives/finite/__init__.py +5 -0
  24. derivkit/derivatives/finite/batch_eval.py +91 -0
  25. derivkit/derivatives/finite/core.py +84 -0
  26. derivkit/derivatives/finite/extrapolators.py +511 -0
  27. derivkit/derivatives/finite/finite_difference.py +247 -0
  28. derivkit/derivatives/finite/stencil.py +206 -0
  29. derivkit/derivatives/fornberg.py +245 -0
  30. derivkit/derivatives/local_polynomial_derivative/__init__.py +1 -0
  31. derivkit/derivatives/local_polynomial_derivative/diagnostics.py +90 -0
  32. derivkit/derivatives/local_polynomial_derivative/fit.py +199 -0
  33. derivkit/derivatives/local_polynomial_derivative/local_poly_config.py +95 -0
  34. derivkit/derivatives/local_polynomial_derivative/local_polynomial_derivative.py +205 -0
  35. derivkit/derivatives/local_polynomial_derivative/sampling.py +72 -0
  36. derivkit/derivatives/tabulated_model/__init__.py +1 -0
  37. derivkit/derivatives/tabulated_model/one_d.py +247 -0
  38. derivkit/forecast_kit.py +783 -0
  39. derivkit/forecasting/__init__.py +1 -0
  40. derivkit/forecasting/dali.py +78 -0
  41. derivkit/forecasting/expansions.py +486 -0
  42. derivkit/forecasting/fisher.py +298 -0
  43. derivkit/forecasting/fisher_gaussian.py +171 -0
  44. derivkit/forecasting/fisher_xy.py +357 -0
  45. derivkit/forecasting/forecast_core.py +313 -0
  46. derivkit/forecasting/getdist_dali_samples.py +429 -0
  47. derivkit/forecasting/getdist_fisher_samples.py +235 -0
  48. derivkit/forecasting/laplace.py +259 -0
  49. derivkit/forecasting/priors_core.py +860 -0
  50. derivkit/forecasting/sampling_utils.py +388 -0
  51. derivkit/likelihood_kit.py +114 -0
  52. derivkit/likelihoods/__init__.py +1 -0
  53. derivkit/likelihoods/gaussian.py +136 -0
  54. derivkit/likelihoods/poisson.py +176 -0
  55. derivkit/utils/__init__.py +13 -0
  56. derivkit/utils/concurrency.py +213 -0
  57. derivkit/utils/extrapolation.py +254 -0
  58. derivkit/utils/linalg.py +513 -0
  59. derivkit/utils/logger.py +26 -0
  60. derivkit/utils/numerics.py +262 -0
  61. derivkit/utils/sandbox.py +74 -0
  62. derivkit/utils/types.py +15 -0
  63. derivkit/utils/validate.py +811 -0
  64. derivkit-1.0.0.dist-info/METADATA +50 -0
  65. derivkit-1.0.0.dist-info/RECORD +68 -0
  66. derivkit-1.0.0.dist-info/WHEEL +5 -0
  67. derivkit-1.0.0.dist-info/licenses/LICENSE +21 -0
  68. derivkit-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,513 @@
1
+ """This module provides small linear-algebra helpers.
2
+
3
+ The main features are:
4
+
5
+ 1) Diagnostics: warn about non-symmetric inputs, ill-conditioning, and rank issues,
6
+ and choose a safe fallback when a fast path fails.
7
+
8
+ 2) Canonicalization: accept covariance inputs in multiple forms (scalar, 1D diagonal
9
+ vector, or 2D matrix) and convert them into a consistent 2D array with validated
10
+ shape and finite values. In other words, we normalize the input representation
11
+ so downstream code always receives a well-formed (k x k) array.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from typing import Any, Mapping
17
+
18
+ import numpy as np
19
+ from numpy.typing import NDArray
20
+
21
+ from derivkit.utils.logger import derivkit_logger
22
+ from derivkit.utils.validate import validate_covariance_matrix_shape
23
+
24
+ CovSpec = NDArray[np.float64] | Mapping[str, Any]
25
+
26
+ __all__ = [
27
+ "invert_covariance",
28
+ "normalize_covariance",
29
+ "solve_or_pinv",
30
+ "symmetrize_matrix",
31
+ "make_spd_by_jitter",
32
+ "split_xy_covariance",
33
+ "as_1d_data_vector",
34
+ ]
35
+
36
+
37
+ def invert_covariance(
38
+ cov: np.ndarray,
39
+ *,
40
+ rcond: float = 1e-12,
41
+ warn_prefix: str = "",
42
+ ) -> np.ndarray:
43
+ """Return the inverse covariance with diagnostics; fall back to pseudoinverse when needed.
44
+
45
+ This helper accepts a scalar (0D), a diagonal variance vector (1D), or a full
46
+ covariance matrix (2D). Inputs are canonicalized to a 2D array before inversion.
47
+ The function warns (but does not modify data) if the matrix is non-symmetric,
48
+ warns on ill-conditioning, and uses a pseudoinverse when inversion is not viable.
49
+
50
+ Args:
51
+ cov: Covariance (scalar, diagonal vector, or full 2D matrix).
52
+ rcond: Cutoff for small singular values used by ``np.linalg.pinv``.
53
+ warn_prefix: Optional prefix included in warnings (e.g., a class or function name).
54
+
55
+ Returns:
56
+ A 2D NumPy array containing the inverse covariance.
57
+
58
+ Raises:
59
+ ValueError: If ``cov`` has more than 2 dimensions or is not square when 2D.
60
+ """
61
+ cov = np.asarray(cov, dtype=float)
62
+
63
+ # Canonicalize to 2D
64
+ if cov.ndim == 0:
65
+ cov = cov.reshape(1, 1)
66
+ elif cov.ndim == 1:
67
+ cov = np.diag(cov)
68
+ elif cov.ndim != 2:
69
+ raise ValueError(f"`cov` must be 0D, 1D, or 2D; got ndim={cov.ndim}.")
70
+
71
+ if cov.ndim == 2 and cov.shape[0] != cov.shape[1]:
72
+ raise ValueError(f"`cov` must be square; got shape={cov.shape}.")
73
+
74
+ prefix = f"[{warn_prefix}] " if warn_prefix else ""
75
+
76
+ # Symmetry check (warn only; do not symmetrize)
77
+ symmetric = np.allclose(cov, cov.T, rtol=1e-12, atol=1e-12)
78
+ if not symmetric:
79
+ derivkit_logger.warning(
80
+ f"{prefix}`cov` is not symmetric; proceeding as-is"
81
+ )
82
+
83
+ n = cov.shape[0]
84
+
85
+ # Ill-conditioning warning
86
+ try:
87
+ cond_val = np.linalg.cond(cov)
88
+ if (not np.isfinite(cond_val)) or (cond_val > 1.0 / rcond):
89
+ derivkit_logger.warning(
90
+ f"{prefix}`cov` is ill-conditioned (cond≈{cond_val:.2e});"
91
+ "results may be unstable."
92
+ )
93
+ except np.linalg.LinAlgError:
94
+ pass
95
+
96
+ # Rank check
97
+ try:
98
+ rank = np.linalg.matrix_rank(cov)
99
+ except np.linalg.LinAlgError:
100
+ rank = n
101
+
102
+ # Try exact inverse when full rank; otherwise pseudoinverse
103
+ if rank == n:
104
+ try:
105
+ inv = np.linalg.inv(cov)
106
+ return np.asarray(inv, dtype=float)
107
+ except np.linalg.LinAlgError:
108
+ pass # fall through to pinv
109
+
110
+ # Pseudoinverse path — IMPORTANT: hermitian = symmetric flag
111
+ derivkit_logger.warning(
112
+ f"{prefix}`cov` inversion failed; "
113
+ "falling back to pseudoinverse "
114
+ "(rcond={rcond})."
115
+ )
116
+ inv_cov = np.linalg.pinv(cov, rcond=rcond, hermitian=symmetric).astype(float, copy=False)
117
+ return inv_cov
118
+
119
+
120
+ def normalize_covariance(
121
+ cov: Any,
122
+ n_parameters: int,
123
+ *,
124
+ asym_atol: float = 1e-12,
125
+ ) -> NDArray[np.float64]:
126
+ """Return a canonicalized covariance matrix.
127
+
128
+ Accepts a scalar (0D), a diagonal variance vector (1D), or a full covariance
129
+ matrix (2D). Validates shapes and finiteness, symmetrizes full matrices,
130
+ and returns a 2D array of shape ``(k, k)``.
131
+
132
+ Args:
133
+ cov: Covariance (scalar, diagonal vector, or full 2D matrix).
134
+ n_parameters: Expected size of the covariance (number of parameters).
135
+ asym_atol: Absolute tolerance for symmetry check of full matrices.
136
+
137
+ Returns:
138
+ A 2D NumPy array containing the canonicalized covariance matrix.
139
+
140
+ Raises:
141
+ ValueError: If ``cov`` has invalid shape, contains non-finite values,
142
+ or is too asymmetric.
143
+ """
144
+ arr = np.asarray(cov, dtype=float)
145
+
146
+ # scalar
147
+ if arr.ndim == 0:
148
+ if not np.isfinite(arr):
149
+ raise ValueError("cov scalar must be finite.")
150
+ return np.eye(n_parameters, dtype=float) * float(arr)
151
+
152
+ # 1D diag
153
+ if arr.ndim == 1:
154
+ if arr.shape[0] != n_parameters:
155
+ raise ValueError(f"cov vector length {arr.shape[0]} != k={n_parameters}.")
156
+ if not np.all(np.isfinite(arr)):
157
+ raise ValueError("cov diagonal contains non-finite values.")
158
+ return np.diag(arr)
159
+
160
+ # 2D full
161
+ if arr.ndim == 2:
162
+ if arr.shape != (n_parameters, n_parameters):
163
+ raise ValueError(f"cov shape {arr.shape} != ({n_parameters},{n_parameters}).")
164
+ if not np.all(np.isfinite(arr)):
165
+ raise ValueError("cov matrix contains non-finite values.")
166
+ a = arr.astype(float, copy=False)
167
+ skew = a - a.T
168
+ fro = np.linalg.norm(a)
169
+ skew_fro = np.linalg.norm(skew)
170
+ thresh = asym_atol * (fro if fro > 0.0 else 1.0)
171
+ if skew_fro > thresh:
172
+ raise ValueError(
173
+ f"cov matrix too asymmetric (‖A-A^T‖_F={skew_fro:.2e} > {thresh:.2e})."
174
+ )
175
+ return 0.5 * (a + a.T)
176
+
177
+ raise ValueError("cov must be scalar, 1D diag vector, or 2D (k,k) matrix.")
178
+
179
+
180
+ def solve_or_pinv(matrix: np.ndarray, vector: np.ndarray, *, rcond: float = 1e-12,
181
+ assume_symmetric: bool = True, warn_context: str = "linear solve") -> np.ndarray:
182
+ """Solve ``matrix @ x = vector`` with pseudoinverse fallback.
183
+
184
+ If ``assume_symmetric`` is True (e.g., Fisher matrices), attempt a
185
+ Cholesky-based solve. If the matrix is not symmetric positive definite
186
+ or is singular, emit a warning and fall back to
187
+ ``np.linalg.pinv(matrix, rcond) @ vector``.
188
+
189
+ Args:
190
+ matrix: Coefficient matrix of shape ``(n, n)``.
191
+ vector: Right-hand side vector or matrix of shape ``(n,)`` or ``(n, k)``.
192
+ rcond: Cutoff for small singular values used by ``np.linalg.pinv``.
193
+ assume_symmetric: If ``True``, prefer a Cholesky solve
194
+ (fast path for symmetric positive definite (SPD)/Hermitian).
195
+ warn_context: Short label included in the warning message.
196
+
197
+ Returns:
198
+ Solution array ``x`` with shape matching ``vector`` (``(n,)`` or ``(n, k)``).
199
+
200
+ Raises:
201
+ ValueError: If shapes of ``matrix`` and ``vector`` are incompatible.
202
+ """
203
+ matrix = np.asarray(matrix, dtype=float)
204
+ vector = np.asarray(vector, dtype=float)
205
+
206
+ # Shape checks
207
+ if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]:
208
+ raise ValueError(f"matrix must be square 2D; got shape {matrix.shape}.")
209
+ n = matrix.shape[0]
210
+ if vector.ndim not in (1, 2) or vector.shape[0] != n:
211
+ raise ValueError(f"vector must have shape (n,) or (n,k) with n={n}; got {vector.shape}.")
212
+
213
+ # Rank-deficient shortcut (ensures test captures a warning)
214
+ try:
215
+ rank = np.linalg.matrix_rank(matrix)
216
+ except np.linalg.LinAlgError:
217
+ rank = n
218
+ if rank < n:
219
+ derivkit_logger.warning(
220
+ f"In {warn_context}, matrix is rank-deficient (rank={rank} < {n}); "
221
+ f"falling back to pseudoinverse with rcond={rcond}."
222
+ )
223
+ hermitian = np.allclose(matrix, matrix.T, rtol=1e-12, atol=1e-12)
224
+ return (np.linalg.pinv(matrix, rcond=rcond, hermitian=hermitian) @ vector).astype(float, copy=False)
225
+
226
+ # Fast path: symmetric/Hermitian or general solve
227
+ try:
228
+ if assume_symmetric:
229
+ l_factor = np.linalg.cholesky(matrix)
230
+ y = np.linalg.solve(l_factor, vector)
231
+ return np.linalg.solve(l_factor.T, y)
232
+ else:
233
+ return np.linalg.solve(matrix, vector)
234
+ except np.linalg.LinAlgError:
235
+ cond_msg = ""
236
+ try:
237
+ cond_val = np.linalg.cond(matrix)
238
+ if np.isfinite(cond_val):
239
+ cond_msg = f" (cond≈{cond_val:.2e})"
240
+ except np.linalg.LinAlgError:
241
+ pass
242
+
243
+ derivkit_logger.warning(
244
+ f"In {warn_context}, the matrix was not SPD or was singular; "
245
+ f"falling back to pseudoinverse with rcond={rcond}{cond_msg}."
246
+ )
247
+ hermitian = np.allclose(matrix, matrix.T, rtol=1e-12, atol=1e-12)
248
+ return (np.linalg.pinv(matrix, rcond=rcond, hermitian=hermitian) @ vector).astype(float, copy=False)
249
+
250
+
251
+ def symmetrize_matrix(a: Any) -> NDArray[np.float64]:
252
+ """Symmetrizes a square matrix.
253
+
254
+ Args:
255
+ a: Array-like square matrix.
256
+
257
+ Returns:
258
+ Symmetric 2D float64 NumPy array.
259
+
260
+ Raises:
261
+ ValueError: If input is not a square 2D array.
262
+ """
263
+ m = np.asarray(a, dtype=float)
264
+ if m.ndim != 2 or m.shape[0] != m.shape[1]:
265
+ raise ValueError(f"matrix must be square 2D; got shape {m.shape}.")
266
+ return (0.5 * (m + m.T)).astype(np.float64, copy=False)
267
+
268
+
269
+ def make_spd_by_jitter(
270
+ matrix: Any,
271
+ *,
272
+ max_tries: int = 12,
273
+ jitter_scale: float = 1e-10,
274
+ jitter_floor: float = 1e-12,
275
+ ) -> tuple[NDArray[np.float64], float]:
276
+ """Makes a symmetric matrix SPD by adding diagonal jitter if necessary.
277
+
278
+ Args:
279
+ matrix: Array-like square matrix.
280
+ max_tries: Maximum number of jitter attempts (powers of 10).
281
+ jitter_scale: Scale factor for jitter based on mean diagonal.
282
+ jitter_floor: Minimum jitter to add.
283
+
284
+ Returns:
285
+ A tuple (spd_matrix, jitter_added), where spd_matrix is the SPD matrix
286
+ and jitter_added is the amount of jitter added to the diagonal.
287
+
288
+ Raises:
289
+ np.linalg.LinAlgError: If the matrix cannot be made SPD within max_tries
290
+ """
291
+ h = symmetrize_matrix(matrix)
292
+ n = h.shape[0]
293
+
294
+ # attempt without jitter first
295
+ try:
296
+ np.linalg.cholesky(h)
297
+ return h, 0.0
298
+ except np.linalg.LinAlgError:
299
+ pass
300
+
301
+ diag_mean = float(np.mean(np.diag(h))) if n else 1.0
302
+ if not np.isfinite(diag_mean) or diag_mean == 0.0:
303
+ diag_mean = 1.0
304
+
305
+ base = jitter_scale * abs(diag_mean) + jitter_floor
306
+ eye = np.eye(n, dtype=np.float64)
307
+
308
+ jitter = 0.0
309
+ for k in range(max_tries):
310
+ jitter = base * (10.0 ** k)
311
+ h_try = h + jitter * eye
312
+ try:
313
+ np.linalg.cholesky(h_try)
314
+ return h_try, float(jitter)
315
+ except np.linalg.LinAlgError:
316
+ continue
317
+
318
+ evals = np.linalg.eigvalsh(h)
319
+ min_eig = float(np.min(evals)) if evals.size else 0.0
320
+ raise np.linalg.LinAlgError(
321
+ "Hessian was not SPD and could not be regularized with diagonal jitter "
322
+ f"(min_eig={min_eig:.2e}, last_jitter={jitter:.2e})."
323
+ )
324
+
325
+
326
+ def split_xy_covariance(
327
+ cov: CovSpec,
328
+ *,
329
+ nx: int,
330
+ atol_sym: float = 1e-12,
331
+ rtol_sym: float = 1e-8,
332
+ ) -> tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]]:
333
+ """Validates and splits a stacked covariance for the concatenated vector ``[x, y]``.
334
+
335
+ This function enforces the convention that the full covariance corresponds to a
336
+ stacked data vector ordered as ``[x, y]``, where ``x`` has length ``nx`` and
337
+ ``y`` has length ``n - nx``. It returns the covariance blocks ``(Cxx, Cxy, Cyy)``
338
+ and raises informative errors if the input is not consistent with this convention.
339
+
340
+ The input may be provided directly as a 2D covariance matrix or as a dict-like
341
+ object that includes the covariance and optional metadata for enforcing or
342
+ reordering the ``[x, y]`` convention.
343
+
344
+ Args:
345
+ cov: Full covariance for the stacked vector ``[x, y]``.
346
+ Supported forms are:
347
+
348
+ * A 2D covariance matrix with shape ``(nx+ny, nx+ny)``
349
+ acting on the stacked measurement vector ``[x, y]``, where ``x``
350
+ (length ``nx``) denotes the input components and ``y`` (length ``ny``)
351
+ denotes the output components. The covariance is assumed to be
352
+ ordered with ``x`` first, followed by ``y``.
353
+ * A dict-like object with key ``"cov"`` containing the 2D array.
354
+ The dict may include:
355
+
356
+ * ``"x_idx"`` and ``"y_idx"``: integer index arrays used to reorder an
357
+ arbitrary covariance into ``[x, y]`` order before splitting.
358
+
359
+ nx: Number of input components in ``x`` (length of ``x`` in the stacked vector).
360
+
361
+ atol_sym: Absolute tolerance used for symmetry and cross-block consistency
362
+ checks.
363
+
364
+ rtol_sym: Relative tolerance used for symmetry and cross-block consistency
365
+ checks.
366
+
367
+ Returns:
368
+ A tuple ``(cxx, cxy, cyy)`` where:
369
+
370
+ * ``cxx`` has shape ``(nx, nx)``.
371
+ * ``cxy`` has shape ``(nx, ny)``.
372
+ * ``cyy`` has shape ``(ny, ny)``.
373
+
374
+ Here ``ny = n - nx``.
375
+
376
+ Raises:
377
+ ValueError: If ``cov`` is not a valid square covariance matrix, contains
378
+ non-finite values, is not symmetric within tolerance, cannot be split
379
+ using ``nx``, or if the cross-blocks are inconsistent with the ``[x, y]``
380
+ stacking convention. Also raised if a dict-like object is missing
381
+ required keys or uses an unsupported order value.
382
+ """
383
+ if isinstance(cov, Mapping):
384
+ spec = cov
385
+ cov_arr = np.asarray(spec["cov"], dtype=np.float64)
386
+
387
+ if ("x_idx" in spec) or ("y_idx" in spec):
388
+ if ("x_idx" not in spec) or ("y_idx" not in spec):
389
+ raise ValueError("If using indices,"
390
+ " you must provide both 'x_idx' and 'y_idx'.")
391
+ x_idx = np.asarray(spec["x_idx"], dtype=np.int64)
392
+ y_idx = np.asarray(spec["y_idx"], dtype=np.int64)
393
+ cov_arr = _reorder_cov_to_xy(cov_arr, x_idx=x_idx, y_idx=y_idx)
394
+
395
+ else:
396
+ cov_arr = np.asarray(cov, dtype=np.float64)
397
+
398
+ validate_covariance_matrix_shape(cov_arr)
399
+
400
+ if not np.all(np.isfinite(cov_arr)):
401
+ raise ValueError("cov must contain only finite values.")
402
+
403
+ if not np.allclose(cov_arr, cov_arr.T, atol=atol_sym, rtol=rtol_sym):
404
+ max_asym = float(np.max(np.abs(cov_arr - cov_arr.T)))
405
+ raise ValueError(
406
+ "cov must be symmetric within tolerance. "
407
+ f"max|cov-cov.T|={max_asym:g} (atol={atol_sym:g}, rtol={rtol_sym:g})."
408
+ )
409
+
410
+ n = int(cov_arr.shape[0])
411
+ if not (0 < nx < n):
412
+ raise ValueError(f"nx must satisfy 0 < nx < cov.shape[0];"
413
+ f" got nx={nx}, cov.shape[0]={n}.")
414
+
415
+ ny = n - nx
416
+
417
+ cxx = cov_arr[:nx, :nx]
418
+ cxy = cov_arr[:nx, nx:]
419
+ cyy = cov_arr[nx:, nx:]
420
+
421
+ if cxx.shape != (nx, nx):
422
+ raise ValueError(f"cxx must have shape ({nx},{nx}); got {cxx.shape}.")
423
+ if cxy.shape != (nx, ny):
424
+ raise ValueError(f"cxy must have shape ({nx},{ny}); got {cxy.shape}.")
425
+ if cyy.shape != (ny, ny):
426
+ raise ValueError(f"cyy must have shape ({ny},{ny}); got {cyy.shape}.")
427
+
428
+ cyx = cov_arr[nx:, :nx]
429
+ if not np.allclose(cxy, cyx.T, atol=atol_sym, rtol=rtol_sym):
430
+ max_cross = float(np.max(np.abs(cxy - cyx.T)))
431
+ raise ValueError(
432
+ "Cross-covariance blocks inconsistent with [x,y] stacking: "
433
+ "expected cov[:nx,nx:] == cov[nx:,:nx].T within tolerance. "
434
+ f"max diff={max_cross:g} (atol={atol_sym:g}, rtol={rtol_sym:g})."
435
+ )
436
+
437
+ return cxx, cxy, cyy
438
+
439
+
440
+ def _reorder_cov_to_xy(
441
+ cov: NDArray[np.float64],
442
+ *,
443
+ x_idx: NDArray[np.int64],
444
+ y_idx: NDArray[np.int64],
445
+ ) -> NDArray[np.float64]:
446
+ """Reorders the subspaces of a full covariance matrix.
447
+
448
+ This helper reindexes the input matrix so that the returned matrix is
449
+ ordered as in a block structure corresponding to the vectors
450
+ ``[x, y]``. The first block corresponds to indices ``x_idx`` and the
451
+ second block corresponds to indices ``y_idx``. It is intended to support
452
+ cases where the original covariance uses a different ordering
453
+ than the required ``[x, y]`` stacking convention.
454
+
455
+ Args:
456
+ cov: Full 2D covariance matrix to reorder.
457
+ x_idx: Integer indices selecting the x components in the original ordering.
458
+ y_idx: Integer indices selecting the y components in the original ordering.
459
+
460
+ Returns:
461
+ A reordered covariance matrix with the same shape as cov, where rows and
462
+ columns are permuted so the stacked order is ``[x, y]``.
463
+
464
+ Raises:
465
+ ValueError: If cov is not a square 2D matrix, if indices are not 1D, are
466
+ out of range, overlap, or do not cover all covariance dimensions
467
+ exactly once.
468
+ """
469
+ cov = np.asarray(cov, dtype=np.float64)
470
+ validate_covariance_matrix_shape(cov)
471
+
472
+ x_idx = np.asarray(x_idx, dtype=np.int64).ravel()
473
+ y_idx = np.asarray(y_idx, dtype=np.int64).ravel()
474
+
475
+ idx = np.concatenate([x_idx, y_idx])
476
+ n = int(cov.shape[0])
477
+
478
+ if idx.size != n:
479
+ raise ValueError(
480
+ "x_idx and y_idx must partition cov dimension exactly: "
481
+ f"len(x_idx)+len(y_idx)={idx.size} vs cov.shape[0]={n}."
482
+ )
483
+ if idx.min(initial=0) < 0 or idx.max(initial=0) >= n:
484
+ raise ValueError("x_idx/y_idx contain out-of-range indices.")
485
+ if np.unique(idx).size != n:
486
+ raise ValueError("x_idx and y_idx must be disjoint and"
487
+ " cover all indices exactly once.")
488
+
489
+ return cov[np.ix_(idx, idx)]
490
+
491
+
492
+ def as_1d_data_vector(y: NDArray[np.float64] | float) -> NDArray[np.float64]:
493
+ """Converts a model output into a 1D data vector.
494
+
495
+ This function standardizes model outputs so downstream code can treat them as a
496
+ single data vector. Scalars are converted to length-1 arrays. Array outputs are
497
+ returned as 1D arrays, flattening higher-rank inputs in row-major ("C") order.
498
+
499
+ Args:
500
+ y: Model output to convert. May be a scalar or an array-like object of any
501
+ shape.
502
+
503
+ Returns:
504
+ A 1D float64 NumPy array representing the model output as a single data
505
+ vector.
506
+ """
507
+ arr = np.asarray(y, dtype=np.float64)
508
+
509
+ if arr.ndim == 0:
510
+ return arr[None]
511
+ if arr.ndim == 1:
512
+ return arr
513
+ return arr.ravel(order="C")
@@ -0,0 +1,26 @@
1
+ """Contains the name for the logger of DerivKit modules.
2
+
3
+ ``derivkit`` uses a simple logging system based on the
4
+ `Logging <https://docs.python.org/3/library/logging.html>`__ standard library.
5
+ Logging messages are grouped in different levels:
6
+
7
+ * ``INFO``: An indication that things are working as expected.
8
+ * ``WARNING``: An indication that something unexpected
9
+ happened which may require attention.
10
+
11
+ By default, only messages of level ``WARNING`` are displayed.
12
+
13
+ Calling applications can configure the format and log level of the displayed messages
14
+ by `Configuring Logging <https://docs.python.org/3/howto/logging.html#configuring-logging>`__
15
+ for ``derivkit.logger.derivkit_logger``, e.g.::
16
+
17
+ >>> import logging
18
+ >>> logging.basicConfig(
19
+ ... level=logging.INFO,
20
+ ... format="%(asctime)s | %(name)s | %(levelname)s | %(message)s",
21
+ ... )
22
+ """
23
+ import logging
24
+
25
+ logger_name = "derivkit"
26
+ derivkit_logger = logging.getLogger(logger_name)