fluxfem 0.1.4__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. fluxfem/__init__.py +68 -0
  2. fluxfem/core/__init__.py +115 -10
  3. fluxfem/core/assembly.py +676 -91
  4. fluxfem/core/basis.py +73 -52
  5. fluxfem/core/dtypes.py +9 -1
  6. fluxfem/core/forms.py +10 -0
  7. fluxfem/core/mixed_assembly.py +263 -0
  8. fluxfem/core/mixed_space.py +348 -0
  9. fluxfem/core/mixed_weakform.py +97 -0
  10. fluxfem/core/solver.py +2 -0
  11. fluxfem/core/space.py +262 -17
  12. fluxfem/core/weakform.py +768 -7
  13. fluxfem/helpers_wf.py +49 -0
  14. fluxfem/mesh/__init__.py +54 -2
  15. fluxfem/mesh/base.py +316 -7
  16. fluxfem/mesh/contact.py +825 -0
  17. fluxfem/mesh/dtypes.py +12 -0
  18. fluxfem/mesh/hex.py +17 -16
  19. fluxfem/mesh/io.py +6 -4
  20. fluxfem/mesh/mortar.py +3907 -0
  21. fluxfem/mesh/supermesh.py +316 -0
  22. fluxfem/mesh/surface.py +22 -4
  23. fluxfem/mesh/tet.py +10 -4
  24. fluxfem/physics/diffusion.py +3 -0
  25. fluxfem/physics/elasticity/hyperelastic.py +3 -0
  26. fluxfem/physics/elasticity/linear.py +9 -2
  27. fluxfem/solver/__init__.py +42 -2
  28. fluxfem/solver/bc.py +38 -2
  29. fluxfem/solver/block_matrix.py +132 -0
  30. fluxfem/solver/block_system.py +454 -0
  31. fluxfem/solver/cg.py +115 -33
  32. fluxfem/solver/dirichlet.py +334 -4
  33. fluxfem/solver/newton.py +237 -60
  34. fluxfem/solver/petsc.py +439 -0
  35. fluxfem/solver/preconditioner.py +106 -0
  36. fluxfem/solver/result.py +18 -0
  37. fluxfem/solver/solve_runner.py +168 -1
  38. fluxfem/solver/solver.py +12 -1
  39. fluxfem/solver/sparse.py +124 -9
  40. fluxfem-0.2.0.dist-info/METADATA +303 -0
  41. fluxfem-0.2.0.dist-info/RECORD +59 -0
  42. fluxfem-0.1.4.dist-info/METADATA +0 -127
  43. fluxfem-0.1.4.dist-info/RECORD +0 -48
  44. {fluxfem-0.1.4.dist-info → fluxfem-0.2.0.dist-info}/LICENSE +0 -0
  45. {fluxfem-0.1.4.dist-info → fluxfem-0.2.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,439 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+ import time
5
+ import warnings
6
+ from typing import Any, Callable
7
+
8
+ try:
9
+ import scipy.sparse as sp
10
+ except Exception: # pragma: no cover
11
+ sp = None
12
+
13
+ from .sparse import FluxSparseMatrix
14
+
15
+
16
+ def petsc_is_available() -> bool:
17
+ try:
18
+ import petsc4py # noqa: F401
19
+ return True
20
+ except Exception:
21
+ return False
22
+
23
+
24
+ def _require_petsc4py():
25
+ try:
26
+ import petsc4py
27
+ petsc4py.init([])
28
+ from petsc4py import PETSc
29
+ return PETSc
30
+ except Exception as exc: # pragma: no cover
31
+ raise ImportError("petsc4py is required for PETSc solves. Install with the petsc extra.") from exc
32
+
33
+
34
+ def _coo_to_csr(rows, cols, data, n_dofs: int):
35
+ r = np.asarray(rows, dtype=np.int64)
36
+ c = np.asarray(cols, dtype=np.int64)
37
+ d = np.asarray(data)
38
+ if r.size == 0:
39
+ indptr = np.zeros(n_dofs + 1, dtype=np.int32)
40
+ indices = np.zeros(0, dtype=np.int32)
41
+ return indptr, indices, d
42
+ order = np.lexsort((c, r))
43
+ r_s = r[order]
44
+ c_s = c[order]
45
+ d_s = d[order]
46
+ new_group = np.ones(r_s.size, dtype=bool)
47
+ new_group[1:] = (r_s[1:] != r_s[:-1]) | (c_s[1:] != c_s[:-1])
48
+ starts = np.nonzero(new_group)[0]
49
+ r_u = r_s[starts]
50
+ c_u = c_s[starts]
51
+ d_u = np.add.reduceat(d_s, starts)
52
+ indptr = np.zeros(n_dofs + 1, dtype=np.int32)
53
+ np.add.at(indptr, r_u + 1, 1)
54
+ indptr = np.cumsum(indptr, dtype=np.int32)
55
+ return indptr, c_u.astype(np.int32), d_u
56
+
57
+
58
+ def _infer_n_dofs(K: Any, F: Any | None, n_dofs: int | None) -> int:
59
+ if n_dofs is not None:
60
+ return int(n_dofs)
61
+ if hasattr(K, "n_dofs"):
62
+ return int(getattr(K, "n_dofs"))
63
+ if hasattr(K, "shape"):
64
+ shape = getattr(K, "shape")
65
+ if shape is not None:
66
+ return int(shape[0])
67
+ if F is not None:
68
+ F_arr = np.asarray(F)
69
+ if F_arr.ndim >= 1:
70
+ return int(F_arr.shape[0])
71
+ raise ValueError("n_dofs is required when operator shape is not available.")
72
+
73
+
74
+ def _matvec_builder(A: Any):
75
+ if isinstance(A, FluxSparseMatrix):
76
+ return lambda x: np.asarray(A.matvec(x))
77
+ if hasattr(A, "matvec"):
78
+ return lambda x: np.asarray(A.matvec(x))
79
+ if callable(A):
80
+ return lambda x: np.asarray(A(x))
81
+ if isinstance(A, tuple) and len(A) == 4:
82
+ return _matvec_builder(FluxSparseMatrix.from_bilinear(A))
83
+ if sp is not None and sp.issparse(A):
84
+ return lambda x: np.asarray(A @ x)
85
+
86
+ def mv(x):
87
+ return np.asarray(A) @ x
88
+
89
+ return mv
90
+
91
+
92
+ def _diag_from_coo(rows, cols, data, n_dofs: int) -> np.ndarray:
93
+ r = np.asarray(rows, dtype=np.int64)
94
+ c = np.asarray(cols, dtype=np.int64)
95
+ d = np.asarray(data)
96
+ diag = np.zeros(n_dofs, dtype=d.dtype)
97
+ mask = r == c
98
+ if np.any(mask):
99
+ np.add.at(diag, r[mask], d[mask])
100
+ return diag
101
+
102
+
103
+ def _diag_from_operator(A: Any, n_dofs: int) -> np.ndarray:
104
+ if isinstance(A, FluxSparseMatrix):
105
+ return np.asarray(A.diag())
106
+ if isinstance(A, tuple) and len(A) == 4:
107
+ rows, cols, data, n_dofs_tuple = A
108
+ n_use = int(n_dofs_tuple) if n_dofs_tuple is not None else n_dofs
109
+ return _diag_from_coo(rows, cols, data, n_use)
110
+ if sp is not None and sp.issparse(A):
111
+ return np.asarray(A.diagonal())
112
+ if hasattr(A, "diag"):
113
+ return np.asarray(A.diag())
114
+ if hasattr(A, "diagonal"):
115
+ return np.asarray(A.diagonal())
116
+ A_np = np.asarray(A)
117
+ if A_np.ndim == 2 and A_np.shape[0] == A_np.shape[1]:
118
+ return np.diag(A_np)
119
+ raise ValueError("diag0 preconditioner requires access to the matrix diagonal.")
120
+
121
+
122
+ def _as_csr(K: Any):
123
+ if isinstance(K, FluxSparseMatrix):
124
+ rows, cols, data, n_dofs = K.to_coo()
125
+ indptr, indices, data = _coo_to_csr(rows, cols, data, int(n_dofs))
126
+ return indptr, indices, data, int(n_dofs)
127
+ if isinstance(K, tuple) and len(K) == 4:
128
+ rows, cols, data, n_dofs = K
129
+ indptr, indices, data = _coo_to_csr(rows, cols, data, int(n_dofs))
130
+ return indptr, indices, data, int(n_dofs)
131
+ if sp is not None and sp.issparse(K):
132
+ K_csr = K.tocsr()
133
+ return (
134
+ K_csr.indptr.astype(np.int32, copy=False),
135
+ K_csr.indices.astype(np.int32, copy=False),
136
+ K_csr.data,
137
+ K_csr.shape[0],
138
+ )
139
+ if hasattr(K, "to_csr"):
140
+ K_csr = K.to_csr()
141
+ return K_csr.indptr.astype(np.int32, copy=False), K_csr.indices.astype(np.int32, copy=False), K_csr.data, K_csr.shape[0]
142
+ K_np = np.asarray(K)
143
+ if K_np.ndim != 2 or K_np.shape[0] != K_np.shape[1]:
144
+ raise ValueError("K must be square for PETSc solve.")
145
+ rows, cols = np.nonzero(K_np)
146
+ data = K_np[rows, cols]
147
+ indptr, indices, data = _coo_to_csr(rows, cols, data, K_np.shape[0])
148
+ return indptr, indices, data, int(K_np.shape[0])
149
+
150
+
151
+ def petsc_solve(
152
+ K: Any,
153
+ F: Any,
154
+ *,
155
+ ksp_type: str = "preonly",
156
+ pc_type: str = "lu",
157
+ rtol: float | None = None,
158
+ atol: float | None = None,
159
+ max_it: int | None = None,
160
+ options: dict[str, Any] | None = None,
161
+ ) -> np.ndarray | tuple[np.ndarray, dict[str, Any]]:
162
+ """
163
+ Solve K u = F using PETSc.
164
+
165
+ Parameters
166
+ ----------
167
+ K : FluxSparseMatrix | COO tuple | ndarray | scipy.sparse matrix
168
+ Assembled system matrix. COO tuple is (rows, cols, data, n_dofs).
169
+ F : array-like
170
+ RHS vector (n_dofs,) or matrix (n_dofs, n_rhs).
171
+ ksp_type / pc_type : str
172
+ PETSc KSP/PC type, e.g., "cg"/"gamg" or "preonly"/"lu".
173
+ options : dict
174
+ Extra PETSc options (name -> value).
175
+ """
176
+ PETSc = _require_petsc4py()
177
+ indptr, indices, data, n_dofs = _as_csr(K)
178
+
179
+ mat = PETSc.Mat().createAIJ(size=(n_dofs, n_dofs), csr=(indptr, indices, np.asarray(data)))
180
+ mat.assemble()
181
+
182
+ ksp = PETSc.KSP().create()
183
+ ksp.setOperators(mat)
184
+ if ksp_type:
185
+ ksp.setType(ksp_type)
186
+ if pc_type:
187
+ ksp.getPC().setType(pc_type)
188
+ if rtol is not None or atol is not None or max_it is not None:
189
+ ksp.setTolerances(
190
+ rtol=rtol if rtol is not None else PETSc.DEFAULT,
191
+ atol=atol if atol is not None else PETSc.DEFAULT,
192
+ max_it=max_it if max_it is not None else PETSc.DEFAULT,
193
+ )
194
+ if options:
195
+ opts = PETSc.Options()
196
+ for key, value in options.items():
197
+ opts[str(key)] = str(value)
198
+ ksp.setFromOptions()
199
+
200
+ F_arr = np.asarray(F)
201
+ if F_arr.ndim == 1:
202
+ if F_arr.shape[0] != n_dofs:
203
+ raise ValueError("F has incompatible size for K.")
204
+ b = PETSc.Vec().createWithArray(F_arr)
205
+ x = PETSc.Vec().createSeq(n_dofs)
206
+ ksp.solve(b, x)
207
+ return np.asarray(x.getArray(), copy=True)
208
+
209
+ if F_arr.ndim == 2:
210
+ if F_arr.shape[0] != n_dofs:
211
+ raise ValueError("F has incompatible size for K.")
212
+ out = []
213
+ for i in range(F_arr.shape[1]):
214
+ b = PETSc.Vec().createWithArray(F_arr[:, i])
215
+ x = PETSc.Vec().createSeq(n_dofs)
216
+ ksp.solve(b, x)
217
+ out.append(np.asarray(x.getArray(), copy=True))
218
+ return np.stack(out, axis=1)
219
+
220
+ raise ValueError("F must be a vector or a 2D array.")
221
+
222
+
223
+ def petsc_shell_solve(
224
+ A: Any,
225
+ F: Any,
226
+ *,
227
+ n_dofs: int | None = None,
228
+ ksp_type: str = "gmres",
229
+ pc_type: str = "none",
230
+ preconditioner: str | None | Callable[[np.ndarray], np.ndarray] = "diag0",
231
+ pmat: Any | None = None,
232
+ rtol: float | None = None,
233
+ atol: float | None = None,
234
+ max_it: int | None = None,
235
+ options: dict[str, Any] | None = None,
236
+ options_prefix: str | None = "fluxfem_",
237
+ return_info: bool = False,
238
+ ) -> np.ndarray:
239
+ """
240
+ Solve A x = F using PETSc with a matrix-free Shell Mat.
241
+
242
+ Parameters
243
+ ----------
244
+ A : callable | object with matvec | FluxSparseMatrix | tuple | ndarray
245
+ Operator to apply in matvec form.
246
+ F : array-like
247
+ RHS vector (n_dofs,) or matrix (n_dofs, n_rhs).
248
+ preconditioner : "diag0" | callable | None
249
+ "diag0" builds a diagonal preconditioner if available.
250
+ pmat : optional
251
+ Assembled matrix used only as the preconditioner operator.
252
+ return_info : bool
253
+ When True, return a (solution, info) tuple.
254
+ """
255
+ PETSc = _require_petsc4py()
256
+ n = _infer_n_dofs(A, F, n_dofs)
257
+ matvec = _matvec_builder(A)
258
+
259
+ class _ShellMatContext:
260
+ def __init__(self, mv):
261
+ self.mv = mv
262
+
263
+ def mult(self, mat, x, y):
264
+ x_arr = x.getArray(readonly=True)
265
+ y_arr = np.asarray(self.mv(x_arr), dtype=x_arr.dtype)
266
+ y.setArray(y_arr)
267
+
268
+ mat_ctx = _ShellMatContext(matvec)
269
+ mat = PETSc.Mat().createPython([n, n])
270
+ mat.setPythonContext(mat_ctx)
271
+ mat.setUp()
272
+
273
+ pmat_aij = None
274
+ pmat_build_time = None
275
+ if pmat is not None:
276
+ t_pmat = time.perf_counter()
277
+ indptr, indices, data, n_p = _as_csr(pmat)
278
+ if n_p != n:
279
+ raise ValueError("pmat has incompatible size for A.")
280
+ pmat_aij = PETSc.Mat().createAIJ(size=(n, n), csr=(indptr, indices, np.asarray(data)))
281
+ pmat_aij.assemble()
282
+ pmat_build_time = time.perf_counter() - t_pmat
283
+
284
+ ksp = PETSc.KSP().create()
285
+ if pmat_aij is None:
286
+ ksp.setOperators(mat)
287
+ else:
288
+ ksp.setOperators(mat, pmat_aij)
289
+ if options_prefix:
290
+ ksp.setOptionsPrefix(options_prefix)
291
+
292
+ if ksp_type:
293
+ ksp.setType(ksp_type)
294
+ if rtol is not None or atol is not None or max_it is not None:
295
+ ksp.setTolerances(
296
+ rtol=rtol if rtol is not None else PETSc.DEFAULT,
297
+ atol=atol if atol is not None else PETSc.DEFAULT,
298
+ max_it=max_it if max_it is not None else PETSc.DEFAULT,
299
+ )
300
+ if options or options_prefix:
301
+ if options:
302
+ opts = PETSc.Options()
303
+ for key, value in options.items():
304
+ opts[str(key)] = str(value)
305
+ ksp.setFromOptions()
306
+ if ksp_type:
307
+ ksp.setType(ksp_type)
308
+ if rtol is not None or atol is not None or max_it is not None:
309
+ ksp.setTolerances(
310
+ rtol=rtol if rtol is not None else PETSc.DEFAULT,
311
+ atol=atol if atol is not None else PETSc.DEFAULT,
312
+ max_it=max_it if max_it is not None else PETSc.DEFAULT,
313
+ )
314
+
315
+ pc = ksp.getPC()
316
+ if preconditioner is None:
317
+ if pc_type:
318
+ pc.setType(pc_type)
319
+ else:
320
+ if preconditioner == "diag0":
321
+ diag_source = pmat if pmat is not None else A
322
+ try:
323
+ diag = _diag_from_operator(diag_source, n)
324
+ except Exception as exc:
325
+ warnings.warn(
326
+ f"diag0 preconditioner unavailable ({exc}); falling back to no preconditioner.",
327
+ RuntimeWarning,
328
+ )
329
+ preconditioner = None
330
+ diag = None
331
+
332
+ if diag is not None:
333
+ inv_diag = np.where(diag != 0.0, 1.0 / diag, 0.0)
334
+
335
+ class _DiagPCContext:
336
+ def __init__(self, inv):
337
+ self.inv = inv
338
+
339
+ def apply(self, pc, x, y):
340
+ x_arr = x.getArray(readonly=True)
341
+ y.setArray(self.inv * x_arr)
342
+
343
+ pc.setType(PETSc.PC.Type.PYTHON)
344
+ pc.setPythonContext(_DiagPCContext(inv_diag))
345
+ elif pc_type and pc_type not in ("none", "NONE"):
346
+ warnings.warn(
347
+ f"pc_type='{pc_type}' requires a usable diagonal; falling back to pc_type='none'.",
348
+ RuntimeWarning,
349
+ )
350
+ pc.setType("none")
351
+ elif pc_type:
352
+ pc.setType(pc_type)
353
+ elif callable(preconditioner):
354
+
355
+ class _CallablePCContext:
356
+ def __init__(self, fn):
357
+ self.fn = fn
358
+
359
+ def apply(self, pc, x, y):
360
+ x_arr = x.getArray(readonly=True)
361
+ y_arr = np.asarray(self.fn(x_arr), dtype=x_arr.dtype)
362
+ y.setArray(y_arr)
363
+
364
+ pc.setType(PETSc.PC.Type.PYTHON)
365
+ pc.setPythonContext(_CallablePCContext(preconditioner))
366
+ else:
367
+ raise ValueError(f"Unknown preconditioner: {preconditioner}")
368
+
369
+ def _ksp_info(solve_time=None, pc_setup_time=None):
370
+ try:
371
+ reason = ksp.getConvergedReason()
372
+ except Exception: # pragma: no cover - defensive
373
+ reason = None
374
+ try:
375
+ iters = ksp.getIterationNumber()
376
+ except Exception: # pragma: no cover - defensive
377
+ iters = None
378
+ try:
379
+ res = ksp.getResidualNorm()
380
+ except Exception: # pragma: no cover - defensive
381
+ res = None
382
+ converged = None
383
+ if reason is not None:
384
+ converged = reason > 0
385
+ return {
386
+ "iters": iters,
387
+ "residual_norm": res,
388
+ "converged": converged,
389
+ "reason": reason,
390
+ "pmat_build_time": pmat_build_time,
391
+ "pc_setup_time": pc_setup_time,
392
+ "solve_time": solve_time,
393
+ }
394
+
395
+ F_arr = np.asarray(F)
396
+ if F_arr.ndim == 1:
397
+ if F_arr.shape[0] != n:
398
+ raise ValueError("F has incompatible size for A.")
399
+ b = PETSc.Vec().createWithArray(F_arr)
400
+ x = PETSc.Vec().createSeq(n)
401
+ t_setup = time.perf_counter()
402
+ ksp.setUp()
403
+ pc_setup_time = time.perf_counter() - t_setup
404
+ t_solve = time.perf_counter()
405
+ ksp.solve(b, x)
406
+ solve_time = time.perf_counter() - t_solve
407
+ x_out = np.asarray(x.getArray(), copy=True)
408
+ if return_info:
409
+ return x_out, _ksp_info(solve_time=solve_time, pc_setup_time=pc_setup_time)
410
+ return x_out
411
+
412
+ if F_arr.ndim == 2:
413
+ if F_arr.shape[0] != n:
414
+ raise ValueError("F has incompatible size for A.")
415
+ out = []
416
+ infos = []
417
+ t_setup = time.perf_counter()
418
+ ksp.setUp()
419
+ pc_setup_time = time.perf_counter() - t_setup
420
+ for i in range(F_arr.shape[1]):
421
+ b = PETSc.Vec().createWithArray(F_arr[:, i])
422
+ x = PETSc.Vec().createSeq(n)
423
+ t_solve = time.perf_counter()
424
+ ksp.solve(b, x)
425
+ solve_time = time.perf_counter() - t_solve
426
+ out.append(np.asarray(x.getArray(), copy=True))
427
+ infos.append(_ksp_info(solve_time=solve_time, pc_setup_time=pc_setup_time))
428
+ x_out = np.stack(out, axis=1)
429
+ if return_info:
430
+ info = {
431
+ "iters": [i.get("iters") for i in infos],
432
+ "residual_norm": np.asarray([i.get("residual_norm") for i in infos]),
433
+ "converged": [i.get("converged") for i in infos],
434
+ "reason": [i.get("reason") for i in infos],
435
+ }
436
+ return x_out, info
437
+ return x_out
438
+
439
+ raise ValueError("F must be a vector or a 2D array.")
@@ -0,0 +1,106 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Iterable, Sequence
4
+
5
+ import numpy as np
6
+ import jax.numpy as jnp
7
+
8
+ try:
9
+ from jax.experimental import sparse as jsparse
10
+ except Exception: # pragma: no cover
11
+ jsparse = None
12
+
13
+ from .sparse import FluxSparseMatrix
14
+
15
+
16
+ def _extract_block_sizes(
17
+ n: int,
18
+ *,
19
+ dof_per_node: int | None,
20
+ block_sizes: Sequence[int] | None,
21
+ meta: dict | None,
22
+ ) -> int:
23
+ sizes = None
24
+ if block_sizes is not None:
25
+ sizes = np.asarray(block_sizes, dtype=int)
26
+ elif dof_per_node is not None:
27
+ if dof_per_node <= 0:
28
+ raise ValueError("dof_per_node must be positive")
29
+ if n % dof_per_node != 0:
30
+ raise ValueError("dof_per_node must divide n_dofs")
31
+ sizes = np.full(n // dof_per_node, dof_per_node, dtype=int)
32
+ elif meta:
33
+ if meta.get("dof_layout") not in (None, "blocked"):
34
+ raise ValueError("block_jacobi requires dof_layout='blocked'")
35
+ if "block_sizes" in meta:
36
+ sizes = np.asarray(meta["block_sizes"], dtype=int)
37
+ elif "dof_per_node" in meta:
38
+ d = int(meta["dof_per_node"])
39
+ if d <= 0 or n % d != 0:
40
+ raise ValueError("meta['dof_per_node'] must divide n_dofs")
41
+ sizes = np.full(n // d, d, dtype=int)
42
+ if sizes is None or sizes.size == 0:
43
+ raise ValueError("block_jacobi requires block_sizes or dof_per_node")
44
+ if np.any(sizes <= 0):
45
+ raise ValueError("block_sizes entries must be positive")
46
+ if int(sizes.sum()) != n:
47
+ raise ValueError("sum(block_sizes) must equal n_dofs")
48
+ if not np.all(sizes == sizes[0]):
49
+ raise ValueError("block_sizes must be uniform for block_jacobi")
50
+ return int(sizes[0])
51
+
52
+
53
+ def make_block_jacobi_preconditioner(
54
+ A,
55
+ *,
56
+ dof_per_node: int | None = None,
57
+ block_sizes: Sequence[int] | None = None,
58
+ ):
59
+ """
60
+ Build block Jacobi preconditioner for blocked DOF layouts.
61
+
62
+ Priority:
63
+ block_sizes -> dof_per_node -> A.meta (block_sizes / dof_per_node) -> error
64
+ """
65
+ if jsparse is not None and isinstance(A, jsparse.BCOO):
66
+ n = int(A.shape[0])
67
+ block_size = _extract_block_sizes(
68
+ n, dof_per_node=dof_per_node, block_sizes=block_sizes, meta=None
69
+ )
70
+ rows = jnp.asarray(A.indices[:, 0])
71
+ cols = jnp.asarray(A.indices[:, 1])
72
+ data = jnp.asarray(A.data)
73
+ elif isinstance(A, FluxSparseMatrix):
74
+ n = int(A.n_dofs)
75
+ block_size = _extract_block_sizes(
76
+ n, dof_per_node=dof_per_node, block_sizes=block_sizes, meta=A.meta
77
+ )
78
+ rows = jnp.asarray(A.pattern.rows)
79
+ cols = jnp.asarray(A.pattern.cols)
80
+ data = jnp.asarray(A.data)
81
+ else:
82
+ raise ValueError("block_jacobi requires FluxSparseMatrix or BCOO")
83
+
84
+ if n % block_size != 0:
85
+ raise ValueError("block_size must divide n_dofs")
86
+ n_block = n // block_size
87
+ block_rows = rows // block_size
88
+ block_cols = cols // block_size
89
+ lr = rows % block_size
90
+ lc = cols % block_size
91
+ mask = block_rows == block_cols
92
+ block_rows = block_rows[mask]
93
+ lr = lr[mask]
94
+ lc = lc[mask]
95
+ data = data[mask]
96
+ blocks = jnp.zeros((n_block, block_size, block_size), dtype=data.dtype)
97
+ blocks = blocks.at[block_rows, lr, lc].add(data)
98
+ blocks = blocks + 1e-12 * jnp.eye(block_size)[None, :, :]
99
+ inv_blocks = jnp.linalg.inv(blocks)
100
+
101
+ def precon(r):
102
+ rb = r.reshape((n_block, block_size))
103
+ zb = jnp.einsum("bij,bj->bi", inv_blocks, rb)
104
+ return zb.reshape((-1,))
105
+
106
+ return precon
fluxfem/solver/result.py CHANGED
@@ -21,6 +21,12 @@ class SolverResult:
21
21
  linear_iters: Optional[int] = None
22
22
  linear_converged: Optional[bool] = None
23
23
  linear_residual: Optional[float] = None
24
+ linear_solve_time: Optional[float] = None
25
+ pc_setup_time: Optional[float] = None
26
+ pmat_build_time: Optional[float] = None
27
+ pmat_rebuilds: Optional[int] = None
28
+ pmat_mode: Optional[str] = None
29
+ linear_fallbacks: Optional[List[str]] = None
24
30
 
25
31
  tol: Optional[float] = None
26
32
  atol: Optional[float] = None
@@ -55,6 +61,18 @@ class SolverResult:
55
61
  parts.append(f"lin_conv={self.linear_converged}")
56
62
  if self.linear_residual is not None:
57
63
  parts.append(f"lin_res={self.linear_residual:.3e}")
64
+ if self.linear_solve_time is not None:
65
+ parts.append(f"lin_solve_dt={self.linear_solve_time:.3e}s")
66
+ if self.pc_setup_time is not None:
67
+ parts.append(f"pc_setup_dt={self.pc_setup_time:.3e}s")
68
+ if self.pmat_build_time is not None:
69
+ parts.append(f"pmat_dt={self.pmat_build_time:.3e}s")
70
+ if self.pmat_rebuilds is not None:
71
+ parts.append(f"pmat_rebuilds={self.pmat_rebuilds}")
72
+ if self.pmat_mode is not None:
73
+ parts.append(f"pmat_mode={self.pmat_mode}")
74
+ if self.linear_fallbacks:
75
+ parts.append(f"lin_fallbacks={self.linear_fallbacks}")
58
76
  if self.stop_reason:
59
77
  parts.append(f"reason={self.stop_reason}")
60
78
  if self.nan_detected: