fluxfem 0.1.4__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. fluxfem/__init__.py +68 -0
  2. fluxfem/core/__init__.py +115 -10
  3. fluxfem/core/assembly.py +676 -91
  4. fluxfem/core/basis.py +73 -52
  5. fluxfem/core/dtypes.py +9 -1
  6. fluxfem/core/forms.py +10 -0
  7. fluxfem/core/mixed_assembly.py +263 -0
  8. fluxfem/core/mixed_space.py +348 -0
  9. fluxfem/core/mixed_weakform.py +97 -0
  10. fluxfem/core/solver.py +2 -0
  11. fluxfem/core/space.py +262 -17
  12. fluxfem/core/weakform.py +768 -7
  13. fluxfem/helpers_wf.py +49 -0
  14. fluxfem/mesh/__init__.py +54 -2
  15. fluxfem/mesh/base.py +316 -7
  16. fluxfem/mesh/contact.py +825 -0
  17. fluxfem/mesh/dtypes.py +12 -0
  18. fluxfem/mesh/hex.py +17 -16
  19. fluxfem/mesh/io.py +6 -4
  20. fluxfem/mesh/mortar.py +3907 -0
  21. fluxfem/mesh/supermesh.py +316 -0
  22. fluxfem/mesh/surface.py +22 -4
  23. fluxfem/mesh/tet.py +10 -4
  24. fluxfem/physics/diffusion.py +3 -0
  25. fluxfem/physics/elasticity/hyperelastic.py +3 -0
  26. fluxfem/physics/elasticity/linear.py +9 -2
  27. fluxfem/solver/__init__.py +42 -2
  28. fluxfem/solver/bc.py +38 -2
  29. fluxfem/solver/block_matrix.py +132 -0
  30. fluxfem/solver/block_system.py +454 -0
  31. fluxfem/solver/cg.py +115 -33
  32. fluxfem/solver/dirichlet.py +334 -4
  33. fluxfem/solver/newton.py +237 -60
  34. fluxfem/solver/petsc.py +439 -0
  35. fluxfem/solver/preconditioner.py +106 -0
  36. fluxfem/solver/result.py +18 -0
  37. fluxfem/solver/solve_runner.py +168 -1
  38. fluxfem/solver/solver.py +12 -1
  39. fluxfem/solver/sparse.py +124 -9
  40. fluxfem-0.2.0.dist-info/METADATA +303 -0
  41. fluxfem-0.2.0.dist-info/RECORD +59 -0
  42. fluxfem-0.1.4.dist-info/METADATA +0 -127
  43. fluxfem-0.1.4.dist-info/RECORD +0 -48
  44. {fluxfem-0.1.4.dist-info → fluxfem-0.2.0.dist-info}/LICENSE +0 -0
  45. {fluxfem-0.1.4.dist-info → fluxfem-0.2.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,132 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Mapping, Sequence
4
+
5
+ import numpy as np
6
+
7
+ try:
8
+ import scipy.sparse as sp
9
+ except Exception: # pragma: no cover
10
+ sp = None
11
+
12
+ from .block_system import split_block_matrix
13
+ from .sparse import FluxSparseMatrix
14
+
15
+
16
+ def diag(**blocks):
17
+ return dict(blocks)
18
+
19
+
20
+ def _infer_sizes_from_diag(diag_blocks):
21
+ sizes = {}
22
+ for name, blk in diag_blocks.items():
23
+ if isinstance(blk, FluxSparseMatrix):
24
+ sizes[name] = int(blk.n_dofs)
25
+ elif sp is not None and sp.issparse(blk):
26
+ shape = blk.shape
27
+ if shape[0] != shape[1]:
28
+ raise ValueError(f"diag block {name} must be square, got {shape}")
29
+ sizes[name] = int(shape[0])
30
+ else:
31
+ arr = np.asarray(blk)
32
+ if arr.ndim != 2 or arr.shape[0] != arr.shape[1]:
33
+ raise ValueError(f"diag block {name} must be square, got {arr.shape}")
34
+ sizes[name] = int(arr.shape[0])
35
+ return sizes
36
+
37
+
38
+ def _add_blocks(a, b):
39
+ if a is None:
40
+ return b
41
+ if b is None:
42
+ return a
43
+ if isinstance(a, FluxSparseMatrix):
44
+ a = a.to_csr()
45
+ if isinstance(b, FluxSparseMatrix):
46
+ b = b.to_csr()
47
+ if sp is not None and sp.issparse(a):
48
+ if sp.issparse(b):
49
+ return a + b
50
+ return a + sp.csr_matrix(np.asarray(b))
51
+ if sp is not None and sp.issparse(b):
52
+ return sp.csr_matrix(np.asarray(a)) + b
53
+ return np.asarray(a) + np.asarray(b)
54
+
55
+
56
+ def _transpose_block(block, rule: str):
57
+ if isinstance(block, FluxSparseMatrix):
58
+ if sp is None:
59
+ raise ImportError("scipy is required to transpose FluxSparseMatrix blocks.")
60
+ block = block.to_csr()
61
+ if sp is not None and sp.issparse(block):
62
+ out = block.T
63
+ else:
64
+ out = np.asarray(block).T
65
+ if rule == "H":
66
+ return out.conjugate()
67
+ return out
68
+
69
+
70
+ def make(
71
+ *,
72
+ diag: Mapping[str, object] | Sequence[object],
73
+ rel: Mapping[tuple[str, str], object] | None = None,
74
+ add_contiguous: object | None = None,
75
+ sizes: Mapping[str, int] | None = None,
76
+ symmetric: bool = False,
77
+ transpose_rule: str = "T",
78
+ ):
79
+ """
80
+ Build a blocks dict from diagonal blocks, optional relations, and a full matrix.
81
+ """
82
+ if isinstance(diag, Mapping):
83
+ diag_map = dict(diag)
84
+ else:
85
+ diag_seq = list(diag)
86
+ if sizes is None:
87
+ diag_map = dict(zip(range(len(diag_seq)), diag_seq))
88
+ else:
89
+ order = tuple(sizes.keys())
90
+ if len(diag_seq) != len(order):
91
+ raise ValueError("diag sequence length must match sizes")
92
+ diag_map = dict(zip(order, diag_seq))
93
+
94
+ if sizes is None:
95
+ sizes = _infer_sizes_from_diag(diag_map)
96
+ order = tuple(sizes.keys())
97
+
98
+ if add_contiguous is None:
99
+ blocks = {name: {} for name in order}
100
+ else:
101
+ blocks = split_block_matrix(add_contiguous, sizes=sizes)
102
+
103
+ for name, blk in diag_map.items():
104
+ if name not in sizes:
105
+ raise KeyError(f"Unknown field '{name}' in diag")
106
+ blocks.setdefault(name, {})
107
+ blocks[name][name] = _add_blocks(blocks[name].get(name), blk)
108
+
109
+ if transpose_rule not in {"T", "H", "none"}:
110
+ raise ValueError("transpose_rule must be one of: T, H, none")
111
+
112
+ if rel is not None:
113
+ for (name_i, name_j), blk in rel.items():
114
+ if name_i not in sizes or name_j not in sizes:
115
+ raise KeyError(f"Unknown field in rel: {(name_i, name_j)}")
116
+ blocks.setdefault(name_i, {})
117
+ blocks[name_i][name_j] = _add_blocks(blocks[name_i].get(name_j), blk)
118
+ if symmetric and name_i != name_j:
119
+ if transpose_rule == "none":
120
+ blocks.setdefault(name_j, {})
121
+ blocks[name_j][name_i] = _add_blocks(blocks[name_j].get(name_i), blk)
122
+ else:
123
+ blocks.setdefault(name_j, {})
124
+ blocks[name_j][name_i] = _add_blocks(
125
+ blocks[name_j].get(name_i),
126
+ _transpose_block(blk, transpose_rule),
127
+ )
128
+
129
+ return blocks
130
+
131
+
132
+ __all__ = ["diag", "make"]
@@ -0,0 +1,454 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Mapping, Sequence
5
+
6
+ import numpy as np
7
+
8
+ try:
9
+ import scipy.sparse as sp
10
+ except Exception: # pragma: no cover
11
+ sp = None
12
+
13
+ from .dirichlet import DirichletBC, free_dofs
14
+ from .sparse import FluxSparseMatrix
15
+
16
+
17
+ @dataclass(frozen=True)
18
+ class BlockSystem:
19
+ K: object
20
+ F: np.ndarray
21
+ free_dofs: np.ndarray
22
+ dirichlet: DirichletBC
23
+ field_order: tuple[str, ...]
24
+ field_slices: dict[str, slice]
25
+
26
+ def expand(self, u_free):
27
+ return self.dirichlet.expand_solution(u_free, free=self.free_dofs, n_total=self.F.shape[0])
28
+
29
+ def split(self, u_full: np.ndarray) -> dict[str, np.ndarray]:
30
+ return {name: np.asarray(u_full)[self.field_slices[name]] for name in self.field_order}
31
+
32
+ def join(self, fields: Mapping[str, np.ndarray]) -> np.ndarray:
33
+ parts = []
34
+ for name in self.field_order:
35
+ if name not in fields:
36
+ raise KeyError(f"Missing field '{name}' in join.")
37
+ parts.append(np.asarray(fields[name]))
38
+ return np.concatenate(parts, axis=0)
39
+
40
+
41
+ def _build_field_slices(order, sizes):
42
+ offsets = {}
43
+ slices = {}
44
+ offset = 0
45
+ for name in order:
46
+ size = int(sizes[name])
47
+ offsets[name] = offset
48
+ slices[name] = slice(offset, offset + size)
49
+ offset += size
50
+ return offsets, slices, offset
51
+
52
+
53
+ def split_block_matrix(matrix, *, sizes: Mapping[str, int], order: Sequence[str] | None = None):
54
+ """
55
+ Split a block matrix into a dict-of-dicts by field order and sizes.
56
+ """
57
+ field_order = tuple(order) if order is not None else tuple(sizes.keys())
58
+ for name in field_order:
59
+ if name not in sizes:
60
+ raise KeyError(f"Missing size for field '{name}'")
61
+ offsets, _, n_total = _build_field_slices(field_order, sizes)
62
+
63
+ if isinstance(matrix, FluxSparseMatrix):
64
+ if sp is None:
65
+ raise ImportError("scipy is required to split FluxSparseMatrix blocks.")
66
+ mat = matrix.to_csr()
67
+ elif sp is not None and sp.issparse(matrix):
68
+ mat = matrix.tocsr()
69
+ else:
70
+ mat = np.asarray(matrix)
71
+
72
+ if mat.shape != (n_total, n_total):
73
+ raise ValueError(f"matrix has shape {mat.shape}, expected {(n_total, n_total)}")
74
+
75
+ blocks: dict[str, dict[str, object]] = {}
76
+ for name_i in field_order:
77
+ row = {}
78
+ i0 = offsets[name_i]
79
+ i1 = i0 + int(sizes[name_i])
80
+ for name_j in field_order:
81
+ j0 = offsets[name_j]
82
+ j1 = j0 + int(sizes[name_j])
83
+ row[name_j] = mat[i0:i1, j0:j1]
84
+ blocks[name_i] = row
85
+ return blocks
86
+
87
+
88
+ def _infer_format(blocks, fmt):
89
+ if fmt != "auto":
90
+ return fmt
91
+ for row in blocks.values():
92
+ for blk in row.values():
93
+ if isinstance(blk, FluxSparseMatrix):
94
+ return "flux"
95
+ if sp is not None and sp.issparse(blk):
96
+ return "csr"
97
+ return "dense"
98
+
99
+
100
+ def _infer_sizes_from_diag_seq(diag_seq):
101
+ sizes = {}
102
+ for idx, blk in enumerate(diag_seq):
103
+ if isinstance(blk, FluxSparseMatrix):
104
+ sizes[idx] = int(blk.n_dofs)
105
+ elif sp is not None and sp.issparse(blk):
106
+ shape = blk.shape
107
+ if shape[0] != shape[1]:
108
+ raise ValueError(f"diag block {idx} must be square, got {shape}")
109
+ sizes[idx] = int(shape[0])
110
+ else:
111
+ arr = np.asarray(blk)
112
+ if arr.ndim != 2 or arr.shape[0] != arr.shape[1]:
113
+ raise ValueError(f"diag block {idx} must be square, got {arr.shape}")
114
+ sizes[idx] = int(arr.shape[0])
115
+ return sizes
116
+
117
+
118
+ def _coerce_rhs(rhs, order, sizes):
119
+ if rhs is None:
120
+ return np.zeros(sum(int(sizes[n]) for n in order), dtype=float)
121
+ if isinstance(rhs, Mapping):
122
+ parts = [np.asarray(rhs.get(name, np.zeros(int(sizes[name]), dtype=float))) for name in order]
123
+ for name, part in zip(order, parts):
124
+ if part.shape != (int(sizes[name]),):
125
+ raise ValueError(f"rhs[{name}] has shape {part.shape}, expected {(int(sizes[name]),)}")
126
+ return np.concatenate(parts, axis=0)
127
+ if hasattr(rhs, "shape") and not isinstance(rhs, (list, tuple)):
128
+ rhs_arr = np.asarray(rhs)
129
+ if rhs_arr.shape != (sum(int(sizes[n]) for n in order),):
130
+ raise ValueError("rhs vector has unexpected shape")
131
+ return rhs_arr
132
+ parts = list(rhs)
133
+ if len(parts) != len(order):
134
+ raise ValueError("rhs sequence length must match number of fields")
135
+ parts = [np.asarray(p) for p in parts]
136
+ for name, part in zip(order, parts):
137
+ if part.shape != (int(sizes[name]),):
138
+ raise ValueError(f"rhs for {name} has shape {part.shape}, expected {(int(sizes[name]),)}")
139
+ return np.concatenate(parts, axis=0)
140
+
141
+
142
+ def _build_dirichlet_from_fields(fields, offsets, *, merge: str):
143
+ if merge not in {"check_equal", "error", "first", "last"}:
144
+ raise ValueError("merge must be one of: check_equal, error, first, last")
145
+ dof_map: dict[int, float] = {}
146
+ for name, spec in fields.items():
147
+ if name not in offsets:
148
+ raise KeyError(f"Unknown field '{name}' in constraints")
149
+ offset = int(offsets[name])
150
+ if isinstance(spec, DirichletBC):
151
+ dofs = spec.dofs
152
+ vals = spec.vals
153
+ elif isinstance(spec, tuple) and len(spec) == 2:
154
+ dofs, vals = spec
155
+ else:
156
+ dofs, vals = spec, None
157
+ bc = DirichletBC(dofs, vals)
158
+ g_dofs = np.asarray(bc.dofs, dtype=int) + offset
159
+ g_vals = np.asarray(bc.vals, dtype=float)
160
+ for d, v in zip(g_dofs, g_vals):
161
+ if d in dof_map:
162
+ if merge == "error":
163
+ raise ValueError(f"Duplicate Dirichlet DOF {d} in constraints")
164
+ if merge == "check_equal":
165
+ if not np.isclose(dof_map[d], v):
166
+ raise ValueError(f"Conflicting Dirichlet value for DOF {d}")
167
+ if merge == "first":
168
+ continue
169
+ dof_map[d] = float(v)
170
+ if not dof_map:
171
+ return DirichletBC(np.array([], dtype=int), np.array([], dtype=float))
172
+ dofs_sorted = np.array(sorted(dof_map.keys()), dtype=int)
173
+ vals_sorted = np.array([dof_map[d] for d in dofs_sorted], dtype=float)
174
+ return DirichletBC(dofs_sorted, vals_sorted)
175
+
176
+
177
+ def _build_dirichlet_from_sequence(seq, order, offsets, *, merge: str):
178
+ if merge not in {"check_equal", "error", "first", "last"}:
179
+ raise ValueError("merge must be one of: check_equal, error, first, last")
180
+ if len(seq) != len(order):
181
+ raise ValueError("constraints sequence length must match order")
182
+ dof_map: dict[int, float] = {}
183
+ for name, spec in zip(order, seq):
184
+ if spec is None:
185
+ continue
186
+ offset = int(offsets[name])
187
+ if isinstance(spec, DirichletBC):
188
+ dofs = spec.dofs
189
+ vals = spec.vals
190
+ elif isinstance(spec, tuple) and len(spec) == 2:
191
+ dofs, vals = spec
192
+ else:
193
+ dofs, vals = spec, None
194
+ bc = DirichletBC(dofs, vals)
195
+ g_dofs = np.asarray(bc.dofs, dtype=int) + offset
196
+ g_vals = np.asarray(bc.vals, dtype=float)
197
+ for d, v in zip(g_dofs, g_vals):
198
+ if d in dof_map:
199
+ if merge == "error":
200
+ raise ValueError(f"Duplicate Dirichlet DOF {d} in constraints")
201
+ if merge == "check_equal":
202
+ if not np.isclose(dof_map[d], v):
203
+ raise ValueError(f"Conflicting Dirichlet value for DOF {d}")
204
+ if merge == "first":
205
+ continue
206
+ dof_map[d] = float(v)
207
+ if not dof_map:
208
+ return DirichletBC(np.array([], dtype=int), np.array([], dtype=float))
209
+ dofs_sorted = np.array(sorted(dof_map.keys()), dtype=int)
210
+ vals_sorted = np.array([dof_map[d] for d in dofs_sorted], dtype=float)
211
+ return DirichletBC(dofs_sorted, vals_sorted)
212
+
213
+
214
+ def _transpose_block(block, rule: str):
215
+ if isinstance(block, FluxSparseMatrix):
216
+ if sp is None:
217
+ raise ImportError("scipy is required to transpose FluxSparseMatrix blocks.")
218
+ block = block.to_csr()
219
+ if sp is not None and sp.issparse(block):
220
+ out = block.T
221
+ else:
222
+ out = np.asarray(block).T
223
+ if rule == "H":
224
+ return out.conjugate()
225
+ return out
226
+
227
+
228
+ def _add_blocks(a, b):
229
+ if a is None:
230
+ return b
231
+ if b is None:
232
+ return a
233
+ if isinstance(a, FluxSparseMatrix):
234
+ a = a.to_csr()
235
+ if isinstance(b, FluxSparseMatrix):
236
+ b = b.to_csr()
237
+ if sp is not None and sp.issparse(a):
238
+ if sp.issparse(b):
239
+ return a + b
240
+ return a + sp.csr_matrix(np.asarray(b))
241
+ if sp is not None and sp.issparse(b):
242
+ return sp.csr_matrix(np.asarray(a)) + b
243
+ return np.asarray(a) + np.asarray(b)
244
+
245
+
246
+ def _blocks_from_diag_rel(
247
+ *,
248
+ diag: Mapping[str, object] | Sequence[object],
249
+ sizes: Mapping[str, int],
250
+ order: Sequence[str],
251
+ rel: Mapping[tuple[str, str], object] | None = None,
252
+ add_contiguous: object | None = None,
253
+ symmetric: bool = False,
254
+ transpose_rule: str = "T",
255
+ ) -> Mapping[str, Mapping[str, object]]:
256
+ if isinstance(diag, Mapping):
257
+ diag_map = dict(diag)
258
+ else:
259
+ diag_seq = list(diag)
260
+ if len(diag_seq) != len(order):
261
+ raise ValueError("diag sequence length must match order")
262
+ diag_map = dict(zip(order, diag_seq))
263
+
264
+ if add_contiguous is None:
265
+ blocks = {name: {} for name in order}
266
+ else:
267
+ blocks = split_block_matrix(add_contiguous, sizes=sizes, order=order)
268
+
269
+ if transpose_rule not in {"T", "H", "none"}:
270
+ raise ValueError("transpose_rule must be one of: T, H, none")
271
+
272
+ for name, blk in diag_map.items():
273
+ if name not in sizes:
274
+ raise KeyError(f"Unknown field '{name}' in diag")
275
+ blocks.setdefault(name, {})
276
+ blocks[name][name] = _add_blocks(blocks[name].get(name), blk)
277
+
278
+ if rel is not None:
279
+ for (name_i, name_j), blk in rel.items():
280
+ if name_i not in sizes or name_j not in sizes:
281
+ raise KeyError(f"Unknown field in rel: {(name_i, name_j)}")
282
+ blocks.setdefault(name_i, {})
283
+ blocks[name_i][name_j] = _add_blocks(blocks[name_i].get(name_j), blk)
284
+ if symmetric and name_i != name_j:
285
+ blocks.setdefault(name_j, {})
286
+ if transpose_rule == "none":
287
+ blocks[name_j][name_i] = _add_blocks(blocks[name_j].get(name_i), blk)
288
+ else:
289
+ blocks[name_j][name_i] = _add_blocks(
290
+ blocks[name_j].get(name_i),
291
+ _transpose_block(blk, transpose_rule),
292
+ )
293
+
294
+ return blocks
295
+
296
+
297
+ def build_block_system(
298
+ *,
299
+ diag: Mapping[str, object] | Sequence[object],
300
+ sizes: Mapping[str, int] | None = None,
301
+ rel: Mapping[tuple[str, str], object] | None = None,
302
+ add_contiguous: object | None = None,
303
+ rhs: Mapping[str, object] | Sequence[object] | np.ndarray | None = None,
304
+ constraints=None,
305
+ merge: str = "check_equal",
306
+ format: str = "auto",
307
+ symmetric: bool = False,
308
+ transpose_rule: str = "T",
309
+ ) -> BlockSystem:
310
+ """
311
+ Build a block system from diagonal blocks, optional relations, and constraints.
312
+
313
+ format:
314
+ - "auto": FluxSparseMatrix if any block is FluxSparseMatrix, CSR if any block is sparse, else dense
315
+ - "flux": return FluxSparseMatrix
316
+ - "csr": return scipy.sparse CSR
317
+ - "dense": return numpy ndarray
318
+ """
319
+ if sizes is None:
320
+ if isinstance(diag, Mapping):
321
+ sizes = _infer_sizes_from_diag(diag)
322
+ field_order = tuple(sizes.keys())
323
+ else:
324
+ sizes = _infer_sizes_from_diag_seq(diag)
325
+ field_order = tuple(range(len(diag)))
326
+ else:
327
+ field_order = tuple(sizes.keys())
328
+ offsets, field_slices, n_total = _build_field_slices(field_order, sizes)
329
+ prefer_flux = False
330
+ if format == "auto":
331
+ if isinstance(add_contiguous, FluxSparseMatrix):
332
+ prefer_flux = True
333
+ if isinstance(diag, Mapping):
334
+ prefer_flux = any(isinstance(blk, FluxSparseMatrix) for blk in diag.values())
335
+ else:
336
+ prefer_flux = any(isinstance(blk, FluxSparseMatrix) for blk in diag)
337
+ blocks = _blocks_from_diag_rel(
338
+ diag=diag,
339
+ rel=rel,
340
+ add_contiguous=add_contiguous,
341
+ sizes=sizes,
342
+ order=field_order,
343
+ symmetric=symmetric,
344
+ transpose_rule=transpose_rule,
345
+ )
346
+ use_format = "flux" if prefer_flux else _infer_format(blocks, format)
347
+
348
+ def _block_shape(name_i, name_j):
349
+ return (int(sizes[name_i]), int(sizes[name_j]))
350
+
351
+ if use_format == "flux":
352
+ rows_list = []
353
+ cols_list = []
354
+ data_list = []
355
+ for name_i in field_order:
356
+ row_blocks = blocks.get(name_i, {})
357
+ for name_j in field_order:
358
+ blk = row_blocks.get(name_j)
359
+ if blk is None:
360
+ continue
361
+ shape = _block_shape(name_i, name_j)
362
+ if isinstance(blk, FluxSparseMatrix):
363
+ if shape[0] != shape[1] or int(blk.n_dofs) != shape[0]:
364
+ raise ValueError(f"Block {name_i},{name_j} has incompatible FluxSparseMatrix size")
365
+ r = np.asarray(blk.pattern.rows, dtype=np.int64)
366
+ c = np.asarray(blk.pattern.cols, dtype=np.int64)
367
+ d = np.asarray(blk.data)
368
+ elif sp is not None and sp.issparse(blk):
369
+ coo = blk.tocoo()
370
+ r = np.asarray(coo.row, dtype=np.int64)
371
+ c = np.asarray(coo.col, dtype=np.int64)
372
+ d = np.asarray(coo.data)
373
+ if coo.shape != shape:
374
+ raise ValueError(f"Block {name_i},{name_j} has shape {coo.shape}, expected {shape}")
375
+ else:
376
+ arr = np.asarray(blk)
377
+ if arr.shape != shape:
378
+ raise ValueError(f"Block {name_i},{name_j} has shape {arr.shape}, expected {shape}")
379
+ r, c = np.nonzero(arr)
380
+ d = arr[r, c]
381
+ if r.size:
382
+ rows_list.append(r + offsets[name_i])
383
+ cols_list.append(c + offsets[name_j])
384
+ data_list.append(d)
385
+ rows = np.concatenate(rows_list) if rows_list else np.asarray([], dtype=np.int32)
386
+ cols = np.concatenate(cols_list) if cols_list else np.asarray([], dtype=np.int32)
387
+ data = np.concatenate(data_list) if data_list else np.asarray([], dtype=float)
388
+ K = FluxSparseMatrix(rows, cols, data, n_total)
389
+ else:
390
+ if use_format == "csr" and sp is None:
391
+ raise ImportError("scipy is required for CSR block systems.")
392
+ block_rows = []
393
+ for name_i in field_order:
394
+ row = []
395
+ row_blocks = blocks.get(name_i, {})
396
+ for name_j in field_order:
397
+ blk = row_blocks.get(name_j)
398
+ shape = _block_shape(name_i, name_j)
399
+ if blk is None:
400
+ if use_format == "csr":
401
+ row.append(sp.csr_matrix(shape))
402
+ else:
403
+ row.append(np.zeros(shape, dtype=float))
404
+ continue
405
+ if isinstance(blk, FluxSparseMatrix):
406
+ if sp is None:
407
+ raise ImportError("scipy is required to assemble sparse block systems.")
408
+ blk = blk.to_csr()
409
+ if sp is not None and sp.issparse(blk):
410
+ blk = blk.tocsr()
411
+ if blk.shape != shape:
412
+ raise ValueError(f"Block {name_i},{name_j} has shape {blk.shape}, expected {shape}")
413
+ row.append(blk)
414
+ else:
415
+ arr = np.asarray(blk)
416
+ if arr.shape != shape:
417
+ raise ValueError(f"Block {name_i},{name_j} has shape {arr.shape}, expected {shape}")
418
+ if use_format == "csr":
419
+ row.append(sp.csr_matrix(arr))
420
+ else:
421
+ row.append(arr)
422
+ block_rows.append(row)
423
+ if use_format == "csr":
424
+ K = sp.bmat(block_rows, format="csr")
425
+ else:
426
+ K = np.block(block_rows)
427
+
428
+ F = _coerce_rhs(rhs, field_order, sizes)
429
+
430
+ if constraints is None:
431
+ bc = DirichletBC(np.array([], dtype=int), np.array([], dtype=float))
432
+ free = free_dofs(n_total, bc.dofs)
433
+ return BlockSystem(K=K, F=F, free_dofs=free, dirichlet=bc, field_order=field_order, field_slices=field_slices)
434
+
435
+ if isinstance(constraints, DirichletBC):
436
+ bc = constraints
437
+ elif isinstance(constraints, tuple) and len(constraints) == 2:
438
+ bc = DirichletBC(constraints[0], constraints[1])
439
+ elif isinstance(constraints, Mapping):
440
+ bc = _build_dirichlet_from_fields(constraints, offsets, merge=merge)
441
+ elif isinstance(constraints, Sequence) and not isinstance(constraints, (str, bytes)):
442
+ bc = _build_dirichlet_from_sequence(constraints, field_order, offsets, merge=merge)
443
+ else:
444
+ raise ValueError("constraints must be DirichletBC, (dofs, vals), or mapping")
445
+
446
+ system = bc.condense_system(K, F)
447
+ return BlockSystem(
448
+ K=system.K,
449
+ F=np.asarray(system.F),
450
+ free_dofs=system.free_dofs,
451
+ dirichlet=bc,
452
+ field_order=field_order,
453
+ field_slices=field_slices,
454
+ )