fluxfem 0.1.4__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. fluxfem/__init__.py +69 -13
  2. fluxfem/core/__init__.py +140 -53
  3. fluxfem/core/assembly.py +691 -97
  4. fluxfem/core/basis.py +75 -54
  5. fluxfem/core/context_types.py +36 -12
  6. fluxfem/core/dtypes.py +9 -1
  7. fluxfem/core/forms.py +10 -0
  8. fluxfem/core/mixed_assembly.py +263 -0
  9. fluxfem/core/mixed_space.py +382 -0
  10. fluxfem/core/mixed_weakform.py +97 -0
  11. fluxfem/core/solver.py +2 -0
  12. fluxfem/core/space.py +315 -30
  13. fluxfem/core/weakform.py +821 -42
  14. fluxfem/helpers_wf.py +49 -0
  15. fluxfem/mesh/__init__.py +54 -2
  16. fluxfem/mesh/base.py +318 -9
  17. fluxfem/mesh/contact.py +841 -0
  18. fluxfem/mesh/dtypes.py +12 -0
  19. fluxfem/mesh/hex.py +17 -16
  20. fluxfem/mesh/io.py +9 -6
  21. fluxfem/mesh/mortar.py +3970 -0
  22. fluxfem/mesh/supermesh.py +318 -0
  23. fluxfem/mesh/surface.py +104 -26
  24. fluxfem/mesh/tet.py +16 -7
  25. fluxfem/physics/diffusion.py +3 -0
  26. fluxfem/physics/elasticity/hyperelastic.py +35 -3
  27. fluxfem/physics/elasticity/linear.py +22 -4
  28. fluxfem/physics/elasticity/stress.py +9 -5
  29. fluxfem/physics/operators.py +12 -5
  30. fluxfem/physics/postprocess.py +29 -3
  31. fluxfem/solver/__init__.py +47 -2
  32. fluxfem/solver/bc.py +38 -2
  33. fluxfem/solver/block_matrix.py +284 -0
  34. fluxfem/solver/block_system.py +477 -0
  35. fluxfem/solver/cg.py +150 -55
  36. fluxfem/solver/dirichlet.py +358 -5
  37. fluxfem/solver/history.py +15 -3
  38. fluxfem/solver/newton.py +260 -70
  39. fluxfem/solver/petsc.py +445 -0
  40. fluxfem/solver/preconditioner.py +109 -0
  41. fluxfem/solver/result.py +18 -0
  42. fluxfem/solver/solve_runner.py +208 -23
  43. fluxfem/solver/solver.py +35 -12
  44. fluxfem/solver/sparse.py +149 -15
  45. fluxfem/tools/jit.py +19 -7
  46. fluxfem/tools/timer.py +14 -12
  47. fluxfem/tools/visualizer.py +16 -4
  48. fluxfem-0.2.1.dist-info/METADATA +314 -0
  49. fluxfem-0.2.1.dist-info/RECORD +59 -0
  50. fluxfem-0.1.4.dist-info/METADATA +0 -127
  51. fluxfem-0.1.4.dist-info/RECORD +0 -48
  52. {fluxfem-0.1.4.dist-info → fluxfem-0.2.1.dist-info}/LICENSE +0 -0
  53. {fluxfem-0.1.4.dist-info → fluxfem-0.2.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,445 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+ import time
5
+ import warnings
6
+ from typing import Any, Callable, TypeAlias
7
+
8
+ try:
9
+ import scipy.sparse as sp
10
+ except Exception: # pragma: no cover
11
+ sp = None
12
+
13
+ from .sparse import FluxSparseMatrix
14
+
15
+ ArrayLike: TypeAlias = np.ndarray
16
+ MatVec: TypeAlias = Callable[[np.ndarray], np.ndarray]
17
+ SolveInfo: TypeAlias = dict[str, Any]
18
+
19
+
20
+ def petsc_is_available() -> bool:
21
+ try:
22
+ import petsc4py # noqa: F401
23
+ return True
24
+ except Exception:
25
+ return False
26
+
27
+
28
+ def _require_petsc4py():
29
+ try:
30
+ import petsc4py
31
+ petsc4py.init([])
32
+ from petsc4py import PETSc
33
+ return PETSc
34
+ except Exception as exc: # pragma: no cover
35
+ raise ImportError("petsc4py is required for PETSc solves. Install with the petsc extra.") from exc
36
+
37
+
38
+ def _coo_to_csr(
39
+ rows: ArrayLike, cols: ArrayLike, data: ArrayLike, n_dofs: int
40
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
41
+ r = np.asarray(rows, dtype=np.int64)
42
+ c = np.asarray(cols, dtype=np.int64)
43
+ d = np.asarray(data)
44
+ if r.size == 0:
45
+ indptr = np.zeros(n_dofs + 1, dtype=np.int32)
46
+ indices = np.zeros(0, dtype=np.int32)
47
+ return indptr, indices, d
48
+ order = np.lexsort((c, r))
49
+ r_s = r[order]
50
+ c_s = c[order]
51
+ d_s = d[order]
52
+ new_group = np.ones(r_s.size, dtype=bool)
53
+ new_group[1:] = (r_s[1:] != r_s[:-1]) | (c_s[1:] != c_s[:-1])
54
+ starts = np.nonzero(new_group)[0]
55
+ r_u = r_s[starts]
56
+ c_u = c_s[starts]
57
+ d_u = np.add.reduceat(d_s, starts)
58
+ indptr = np.zeros(n_dofs + 1, dtype=np.int32)
59
+ np.add.at(indptr, r_u + 1, 1)
60
+ indptr = np.cumsum(indptr, dtype=np.int32)
61
+ return indptr, c_u.astype(np.int32), d_u
62
+
63
+
64
+ def _infer_n_dofs(K: Any, F: Any | None, n_dofs: int | None) -> int:
65
+ if n_dofs is not None:
66
+ return int(n_dofs)
67
+ if hasattr(K, "n_dofs"):
68
+ return int(getattr(K, "n_dofs"))
69
+ if hasattr(K, "shape"):
70
+ shape = getattr(K, "shape")
71
+ if shape is not None:
72
+ return int(shape[0])
73
+ if F is not None:
74
+ F_arr = np.asarray(F)
75
+ if F_arr.ndim >= 1:
76
+ return int(F_arr.shape[0])
77
+ raise ValueError("n_dofs is required when operator shape is not available.")
78
+
79
+
80
+ def _matvec_builder(A: Any) -> MatVec:
81
+ if isinstance(A, FluxSparseMatrix):
82
+ return lambda x: np.asarray(A.matvec(x))
83
+ if hasattr(A, "matvec"):
84
+ return lambda x: np.asarray(A.matvec(x))
85
+ if callable(A):
86
+ return lambda x: np.asarray(A(x))
87
+ if isinstance(A, tuple) and len(A) == 4:
88
+ return _matvec_builder(FluxSparseMatrix.from_bilinear(A))
89
+ if sp is not None and sp.issparse(A):
90
+ return lambda x: np.asarray(A @ x)
91
+
92
+ def mv(x):
93
+ return np.asarray(A) @ x
94
+
95
+ return mv
96
+
97
+
98
+ def _diag_from_coo(rows: ArrayLike, cols: ArrayLike, data: ArrayLike, n_dofs: int) -> np.ndarray:
99
+ r = np.asarray(rows, dtype=np.int64)
100
+ c = np.asarray(cols, dtype=np.int64)
101
+ d = np.asarray(data)
102
+ diag = np.zeros(n_dofs, dtype=d.dtype)
103
+ mask = r == c
104
+ if np.any(mask):
105
+ np.add.at(diag, r[mask], d[mask])
106
+ return diag
107
+
108
+
109
+ def _diag_from_operator(A: Any, n_dofs: int) -> np.ndarray:
110
+ if isinstance(A, FluxSparseMatrix):
111
+ return np.asarray(A.diag())
112
+ if isinstance(A, tuple) and len(A) == 4:
113
+ rows, cols, data, n_dofs_tuple = A
114
+ n_use = int(n_dofs_tuple) if n_dofs_tuple is not None else n_dofs
115
+ return _diag_from_coo(rows, cols, data, n_use)
116
+ if sp is not None and sp.issparse(A):
117
+ return np.asarray(A.diagonal())
118
+ if hasattr(A, "diag"):
119
+ return np.asarray(A.diag())
120
+ if hasattr(A, "diagonal"):
121
+ return np.asarray(A.diagonal())
122
+ A_np = np.asarray(A)
123
+ if A_np.ndim == 2 and A_np.shape[0] == A_np.shape[1]:
124
+ return np.diag(A_np)
125
+ raise ValueError("diag0 preconditioner requires access to the matrix diagonal.")
126
+
127
+
128
+ def _as_csr(K: Any) -> tuple[np.ndarray, np.ndarray, np.ndarray, int]:
129
+ if isinstance(K, FluxSparseMatrix):
130
+ rows, cols, data, n_dofs = K.to_coo()
131
+ indptr, indices, data = _coo_to_csr(rows, cols, data, int(n_dofs))
132
+ return indptr, indices, data, int(n_dofs)
133
+ if isinstance(K, tuple) and len(K) == 4:
134
+ rows, cols, data, n_dofs = K
135
+ indptr, indices, data = _coo_to_csr(rows, cols, data, int(n_dofs))
136
+ return indptr, indices, data, int(n_dofs)
137
+ if sp is not None and sp.issparse(K):
138
+ K_csr = K.tocsr()
139
+ return (
140
+ K_csr.indptr.astype(np.int32, copy=False),
141
+ K_csr.indices.astype(np.int32, copy=False),
142
+ K_csr.data,
143
+ K_csr.shape[0],
144
+ )
145
+ if hasattr(K, "to_csr"):
146
+ K_csr = K.to_csr()
147
+ return K_csr.indptr.astype(np.int32, copy=False), K_csr.indices.astype(np.int32, copy=False), K_csr.data, K_csr.shape[0]
148
+ K_np = np.asarray(K)
149
+ if K_np.ndim != 2 or K_np.shape[0] != K_np.shape[1]:
150
+ raise ValueError("K must be square for PETSc solve.")
151
+ rows, cols = np.nonzero(K_np)
152
+ data = K_np[rows, cols]
153
+ indptr, indices, data = _coo_to_csr(rows, cols, data, K_np.shape[0])
154
+ return indptr, indices, data, int(K_np.shape[0])
155
+
156
+
157
+ def petsc_solve(
158
+ K: Any,
159
+ F: Any,
160
+ *,
161
+ ksp_type: str = "preonly",
162
+ pc_type: str = "lu",
163
+ rtol: float | None = None,
164
+ atol: float | None = None,
165
+ max_it: int | None = None,
166
+ options: dict[str, Any] | None = None,
167
+ ) -> np.ndarray:
168
+ """
169
+ Solve K u = F using PETSc.
170
+
171
+ Parameters
172
+ ----------
173
+ K : FluxSparseMatrix | COO tuple | ndarray | scipy.sparse matrix
174
+ Assembled system matrix. COO tuple is (rows, cols, data, n_dofs).
175
+ F : array-like
176
+ RHS vector (n_dofs,) or matrix (n_dofs, n_rhs).
177
+ ksp_type / pc_type : str
178
+ PETSc KSP/PC type, e.g., "cg"/"gamg" or "preonly"/"lu".
179
+ options : dict
180
+ Extra PETSc options (name -> value).
181
+ """
182
+ PETSc = _require_petsc4py()
183
+ indptr, indices, data, n_dofs = _as_csr(K)
184
+
185
+ mat = PETSc.Mat().createAIJ(size=(n_dofs, n_dofs), csr=(indptr, indices, np.asarray(data)))
186
+ mat.assemble()
187
+
188
+ ksp = PETSc.KSP().create()
189
+ ksp.setOperators(mat)
190
+ if ksp_type:
191
+ ksp.setType(ksp_type)
192
+ if pc_type:
193
+ ksp.getPC().setType(pc_type)
194
+ if rtol is not None or atol is not None or max_it is not None:
195
+ ksp.setTolerances(
196
+ rtol=rtol if rtol is not None else PETSc.DEFAULT,
197
+ atol=atol if atol is not None else PETSc.DEFAULT,
198
+ max_it=max_it if max_it is not None else PETSc.DEFAULT,
199
+ )
200
+ if options:
201
+ opts = PETSc.Options()
202
+ for key, value in options.items():
203
+ opts[str(key)] = str(value)
204
+ ksp.setFromOptions()
205
+
206
+ F_arr = np.asarray(F)
207
+ if F_arr.ndim == 1:
208
+ if F_arr.shape[0] != n_dofs:
209
+ raise ValueError("F has incompatible size for K.")
210
+ b = PETSc.Vec().createWithArray(F_arr)
211
+ x = PETSc.Vec().createSeq(n_dofs)
212
+ ksp.solve(b, x)
213
+ return np.asarray(x.getArray(), copy=True)
214
+
215
+ if F_arr.ndim == 2:
216
+ if F_arr.shape[0] != n_dofs:
217
+ raise ValueError("F has incompatible size for K.")
218
+ out = []
219
+ for i in range(F_arr.shape[1]):
220
+ b = PETSc.Vec().createWithArray(F_arr[:, i])
221
+ x = PETSc.Vec().createSeq(n_dofs)
222
+ ksp.solve(b, x)
223
+ out.append(np.asarray(x.getArray(), copy=True))
224
+ return np.stack(out, axis=1)
225
+
226
+ raise ValueError("F must be a vector or a 2D array.")
227
+
228
+
229
+ def petsc_shell_solve(
230
+ A: Any,
231
+ F: Any,
232
+ *,
233
+ n_dofs: int | None = None,
234
+ ksp_type: str = "gmres",
235
+ pc_type: str = "none",
236
+ preconditioner: str | None | Callable[[np.ndarray], np.ndarray] = "diag0",
237
+ pmat: Any | None = None,
238
+ rtol: float | None = None,
239
+ atol: float | None = None,
240
+ max_it: int | None = None,
241
+ options: dict[str, Any] | None = None,
242
+ options_prefix: str | None = "fluxfem_",
243
+ return_info: bool = False,
244
+ ) -> np.ndarray | tuple[np.ndarray, SolveInfo]:
245
+ """
246
+ Solve A x = F using PETSc with a matrix-free Shell Mat.
247
+
248
+ Parameters
249
+ ----------
250
+ A : callable | object with matvec | FluxSparseMatrix | tuple | ndarray
251
+ Operator to apply in matvec form.
252
+ F : array-like
253
+ RHS vector (n_dofs,) or matrix (n_dofs, n_rhs).
254
+ preconditioner : "diag0" | callable | None
255
+ "diag0" builds a diagonal preconditioner if available.
256
+ pmat : optional
257
+ Assembled matrix used only as the preconditioner operator.
258
+ return_info : bool
259
+ When True, return a (solution, info) tuple.
260
+ """
261
+ PETSc = _require_petsc4py()
262
+ n = _infer_n_dofs(A, F, n_dofs)
263
+ matvec = _matvec_builder(A)
264
+
265
+ class _ShellMatContext:
266
+ def __init__(self, mv):
267
+ self.mv = mv
268
+
269
+ def mult(self, mat, x, y):
270
+ x_arr = x.getArray(readonly=True)
271
+ y_arr = np.asarray(self.mv(x_arr), dtype=x_arr.dtype)
272
+ y.setArray(y_arr)
273
+
274
+ mat_ctx = _ShellMatContext(matvec)
275
+ mat = PETSc.Mat().createPython([n, n])
276
+ mat.setPythonContext(mat_ctx)
277
+ mat.setUp()
278
+
279
+ pmat_aij = None
280
+ pmat_build_time = None
281
+ if pmat is not None:
282
+ t_pmat = time.perf_counter()
283
+ indptr, indices, data, n_p = _as_csr(pmat)
284
+ if n_p != n:
285
+ raise ValueError("pmat has incompatible size for A.")
286
+ pmat_aij = PETSc.Mat().createAIJ(size=(n, n), csr=(indptr, indices, np.asarray(data)))
287
+ pmat_aij.assemble()
288
+ pmat_build_time = time.perf_counter() - t_pmat
289
+
290
+ ksp = PETSc.KSP().create()
291
+ if pmat_aij is None:
292
+ ksp.setOperators(mat)
293
+ else:
294
+ ksp.setOperators(mat, pmat_aij)
295
+ if options_prefix:
296
+ ksp.setOptionsPrefix(options_prefix)
297
+
298
+ if ksp_type:
299
+ ksp.setType(ksp_type)
300
+ if rtol is not None or atol is not None or max_it is not None:
301
+ ksp.setTolerances(
302
+ rtol=rtol if rtol is not None else PETSc.DEFAULT,
303
+ atol=atol if atol is not None else PETSc.DEFAULT,
304
+ max_it=max_it if max_it is not None else PETSc.DEFAULT,
305
+ )
306
+ if options or options_prefix:
307
+ if options:
308
+ opts = PETSc.Options()
309
+ for key, value in options.items():
310
+ opts[str(key)] = str(value)
311
+ ksp.setFromOptions()
312
+ if ksp_type:
313
+ ksp.setType(ksp_type)
314
+ if rtol is not None or atol is not None or max_it is not None:
315
+ ksp.setTolerances(
316
+ rtol=rtol if rtol is not None else PETSc.DEFAULT,
317
+ atol=atol if atol is not None else PETSc.DEFAULT,
318
+ max_it=max_it if max_it is not None else PETSc.DEFAULT,
319
+ )
320
+
321
+ pc = ksp.getPC()
322
+ if preconditioner is None:
323
+ if pc_type:
324
+ pc.setType(pc_type)
325
+ else:
326
+ if preconditioner == "diag0":
327
+ diag_source = pmat if pmat is not None else A
328
+ try:
329
+ diag = _diag_from_operator(diag_source, n)
330
+ except Exception as exc:
331
+ warnings.warn(
332
+ f"diag0 preconditioner unavailable ({exc}); falling back to no preconditioner.",
333
+ RuntimeWarning,
334
+ )
335
+ preconditioner = None
336
+ diag = None
337
+
338
+ if diag is not None:
339
+ inv_diag = np.where(diag != 0.0, 1.0 / diag, 0.0)
340
+
341
+ class _DiagPCContext:
342
+ def __init__(self, inv):
343
+ self.inv = inv
344
+
345
+ def apply(self, pc, x, y):
346
+ x_arr = x.getArray(readonly=True)
347
+ y.setArray(self.inv * x_arr)
348
+
349
+ pc.setType(PETSc.PC.Type.PYTHON)
350
+ pc.setPythonContext(_DiagPCContext(inv_diag))
351
+ elif pc_type and pc_type not in ("none", "NONE"):
352
+ warnings.warn(
353
+ f"pc_type='{pc_type}' requires a usable diagonal; falling back to pc_type='none'.",
354
+ RuntimeWarning,
355
+ )
356
+ pc.setType("none")
357
+ elif pc_type:
358
+ pc.setType(pc_type)
359
+ elif callable(preconditioner):
360
+
361
+ class _CallablePCContext:
362
+ def __init__(self, fn):
363
+ self.fn = fn
364
+
365
+ def apply(self, pc, x, y):
366
+ x_arr = x.getArray(readonly=True)
367
+ y_arr = np.asarray(self.fn(x_arr), dtype=x_arr.dtype)
368
+ y.setArray(y_arr)
369
+
370
+ pc.setType(PETSc.PC.Type.PYTHON)
371
+ pc.setPythonContext(_CallablePCContext(preconditioner))
372
+ else:
373
+ raise ValueError(f"Unknown preconditioner: {preconditioner}")
374
+
375
+ def _ksp_info(solve_time=None, pc_setup_time=None):
376
+ try:
377
+ reason = ksp.getConvergedReason()
378
+ except Exception: # pragma: no cover - defensive
379
+ reason = None
380
+ try:
381
+ iters = ksp.getIterationNumber()
382
+ except Exception: # pragma: no cover - defensive
383
+ iters = None
384
+ try:
385
+ res = ksp.getResidualNorm()
386
+ except Exception: # pragma: no cover - defensive
387
+ res = None
388
+ converged = None
389
+ if reason is not None:
390
+ converged = reason > 0
391
+ return {
392
+ "iters": iters,
393
+ "residual_norm": res,
394
+ "converged": converged,
395
+ "reason": reason,
396
+ "pmat_build_time": pmat_build_time,
397
+ "pc_setup_time": pc_setup_time,
398
+ "solve_time": solve_time,
399
+ }
400
+
401
+ F_arr = np.asarray(F)
402
+ if F_arr.ndim == 1:
403
+ if F_arr.shape[0] != n:
404
+ raise ValueError("F has incompatible size for A.")
405
+ b = PETSc.Vec().createWithArray(F_arr)
406
+ x = PETSc.Vec().createSeq(n)
407
+ t_setup = time.perf_counter()
408
+ ksp.setUp()
409
+ pc_setup_time = time.perf_counter() - t_setup
410
+ t_solve = time.perf_counter()
411
+ ksp.solve(b, x)
412
+ solve_time = time.perf_counter() - t_solve
413
+ x_out = np.asarray(x.getArray(), copy=True)
414
+ if return_info:
415
+ return x_out, _ksp_info(solve_time=solve_time, pc_setup_time=pc_setup_time)
416
+ return x_out
417
+
418
+ if F_arr.ndim == 2:
419
+ if F_arr.shape[0] != n:
420
+ raise ValueError("F has incompatible size for A.")
421
+ out = []
422
+ infos = []
423
+ t_setup = time.perf_counter()
424
+ ksp.setUp()
425
+ pc_setup_time = time.perf_counter() - t_setup
426
+ for i in range(F_arr.shape[1]):
427
+ b = PETSc.Vec().createWithArray(F_arr[:, i])
428
+ x = PETSc.Vec().createSeq(n)
429
+ t_solve = time.perf_counter()
430
+ ksp.solve(b, x)
431
+ solve_time = time.perf_counter() - t_solve
432
+ out.append(np.asarray(x.getArray(), copy=True))
433
+ infos.append(_ksp_info(solve_time=solve_time, pc_setup_time=pc_setup_time))
434
+ x_out = np.stack(out, axis=1)
435
+ if return_info:
436
+ info = {
437
+ "iters": [i.get("iters") for i in infos],
438
+ "residual_norm": np.asarray([i.get("residual_norm") for i in infos]),
439
+ "converged": [i.get("converged") for i in infos],
440
+ "reason": [i.get("reason") for i in infos],
441
+ }
442
+ return x_out, info
443
+ return x_out
444
+
445
+ raise ValueError("F must be a vector or a 2D array.")
@@ -0,0 +1,109 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Callable, Iterable, Sequence, TypeAlias
4
+
5
+ import numpy as np
6
+ import jax.numpy as jnp
7
+
8
+ try:
9
+ from jax.experimental import sparse as jsparse
10
+ except Exception: # pragma: no cover
11
+ jsparse = None
12
+
13
+ from .sparse import FluxSparseMatrix
14
+
15
+ ArrayLike: TypeAlias = jnp.ndarray
16
+ Preconditioner: TypeAlias = Callable[[jnp.ndarray], jnp.ndarray]
17
+
18
+
19
+ def _extract_block_sizes(
20
+ n: int,
21
+ *,
22
+ dof_per_node: int | None,
23
+ block_sizes: Sequence[int] | None,
24
+ meta: dict | None,
25
+ ) -> int:
26
+ sizes = None
27
+ if block_sizes is not None:
28
+ sizes = np.asarray(block_sizes, dtype=int)
29
+ elif dof_per_node is not None:
30
+ if dof_per_node <= 0:
31
+ raise ValueError("dof_per_node must be positive")
32
+ if n % dof_per_node != 0:
33
+ raise ValueError("dof_per_node must divide n_dofs")
34
+ sizes = np.full(n // dof_per_node, dof_per_node, dtype=int)
35
+ elif meta:
36
+ if meta.get("dof_layout") not in (None, "blocked"):
37
+ raise ValueError("block_jacobi requires dof_layout='blocked'")
38
+ if "block_sizes" in meta:
39
+ sizes = np.asarray(meta["block_sizes"], dtype=int)
40
+ elif "dof_per_node" in meta:
41
+ d = int(meta["dof_per_node"])
42
+ if d <= 0 or n % d != 0:
43
+ raise ValueError("meta['dof_per_node'] must divide n_dofs")
44
+ sizes = np.full(n // d, d, dtype=int)
45
+ if sizes is None or sizes.size == 0:
46
+ raise ValueError("block_jacobi requires block_sizes or dof_per_node")
47
+ if np.any(sizes <= 0):
48
+ raise ValueError("block_sizes entries must be positive")
49
+ if int(sizes.sum()) != n:
50
+ raise ValueError("sum(block_sizes) must equal n_dofs")
51
+ if not np.all(sizes == sizes[0]):
52
+ raise ValueError("block_sizes must be uniform for block_jacobi")
53
+ return int(sizes[0])
54
+
55
+
56
+ def make_block_jacobi_preconditioner(
57
+ A: FluxSparseMatrix | "jsparse.BCOO",
58
+ *,
59
+ dof_per_node: int | None = None,
60
+ block_sizes: Sequence[int] | None = None,
61
+ ) -> Preconditioner:
62
+ """
63
+ Build block Jacobi preconditioner for blocked DOF layouts.
64
+
65
+ Priority:
66
+ block_sizes -> dof_per_node -> A.meta (block_sizes / dof_per_node) -> error
67
+ """
68
+ if jsparse is not None and isinstance(A, jsparse.BCOO):
69
+ n = int(A.shape[0])
70
+ block_size = _extract_block_sizes(
71
+ n, dof_per_node=dof_per_node, block_sizes=block_sizes, meta=None
72
+ )
73
+ rows = jnp.asarray(A.indices[:, 0])
74
+ cols = jnp.asarray(A.indices[:, 1])
75
+ data = jnp.asarray(A.data)
76
+ elif isinstance(A, FluxSparseMatrix):
77
+ n = int(A.n_dofs)
78
+ block_size = _extract_block_sizes(
79
+ n, dof_per_node=dof_per_node, block_sizes=block_sizes, meta=A.meta
80
+ )
81
+ rows = jnp.asarray(A.pattern.rows)
82
+ cols = jnp.asarray(A.pattern.cols)
83
+ data = jnp.asarray(A.data)
84
+ else:
85
+ raise ValueError("block_jacobi requires FluxSparseMatrix or BCOO")
86
+
87
+ if n % block_size != 0:
88
+ raise ValueError("block_size must divide n_dofs")
89
+ n_block = n // block_size
90
+ block_rows = rows // block_size
91
+ block_cols = cols // block_size
92
+ lr = rows % block_size
93
+ lc = cols % block_size
94
+ mask = block_rows == block_cols
95
+ block_rows = block_rows[mask]
96
+ lr = lr[mask]
97
+ lc = lc[mask]
98
+ data = data[mask]
99
+ blocks = jnp.zeros((n_block, block_size, block_size), dtype=data.dtype)
100
+ blocks = blocks.at[block_rows, lr, lc].add(data)
101
+ blocks = blocks + 1e-12 * jnp.eye(block_size)[None, :, :]
102
+ inv_blocks = jnp.linalg.inv(blocks)
103
+
104
+ def precon(r: jnp.ndarray) -> jnp.ndarray:
105
+ rb = r.reshape((n_block, block_size))
106
+ zb = jnp.einsum("bij,bj->bi", inv_blocks, rb)
107
+ return zb.reshape((-1,))
108
+
109
+ return precon
fluxfem/solver/result.py CHANGED
@@ -21,6 +21,12 @@ class SolverResult:
21
21
  linear_iters: Optional[int] = None
22
22
  linear_converged: Optional[bool] = None
23
23
  linear_residual: Optional[float] = None
24
+ linear_solve_time: Optional[float] = None
25
+ pc_setup_time: Optional[float] = None
26
+ pmat_build_time: Optional[float] = None
27
+ pmat_rebuilds: Optional[int] = None
28
+ pmat_mode: Optional[str] = None
29
+ linear_fallbacks: Optional[List[str]] = None
24
30
 
25
31
  tol: Optional[float] = None
26
32
  atol: Optional[float] = None
@@ -55,6 +61,18 @@ class SolverResult:
55
61
  parts.append(f"lin_conv={self.linear_converged}")
56
62
  if self.linear_residual is not None:
57
63
  parts.append(f"lin_res={self.linear_residual:.3e}")
64
+ if self.linear_solve_time is not None:
65
+ parts.append(f"lin_solve_dt={self.linear_solve_time:.3e}s")
66
+ if self.pc_setup_time is not None:
67
+ parts.append(f"pc_setup_dt={self.pc_setup_time:.3e}s")
68
+ if self.pmat_build_time is not None:
69
+ parts.append(f"pmat_dt={self.pmat_build_time:.3e}s")
70
+ if self.pmat_rebuilds is not None:
71
+ parts.append(f"pmat_rebuilds={self.pmat_rebuilds}")
72
+ if self.pmat_mode is not None:
73
+ parts.append(f"pmat_mode={self.pmat_mode}")
74
+ if self.linear_fallbacks:
75
+ parts.append(f"lin_fallbacks={self.linear_fallbacks}")
58
76
  if self.stop_reason:
59
77
  parts.append(f"reason={self.stop_reason}")
60
78
  if self.nan_detected: