redbirdpy 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redbirdpy/__init__.py +112 -0
- redbirdpy/analytical.py +927 -0
- redbirdpy/forward.py +589 -0
- redbirdpy/property.py +602 -0
- redbirdpy/recon.py +893 -0
- redbirdpy/solver.py +814 -0
- redbirdpy/utility.py +1117 -0
- redbirdpy-0.1.0.dist-info/METADATA +596 -0
- redbirdpy-0.1.0.dist-info/RECORD +13 -0
- redbirdpy-0.1.0.dist-info/WHEEL +5 -0
- redbirdpy-0.1.0.dist-info/licenses/LICENSE.txt +674 -0
- redbirdpy-0.1.0.dist-info/top_level.txt +1 -0
- redbirdpy-0.1.0.dist-info/zip-safe +1 -0
redbirdpy/utility.py
ADDED
|
@@ -0,0 +1,1117 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Redbird Utility Module - Mesh and data utilities for DOT/NIRS.
|
|
3
|
+
|
|
4
|
+
INDEX CONVENTION: All mesh indices (elem, face) are 1-based to match
|
|
5
|
+
MATLAB/iso2mesh. Conversion to 0-based occurs only when indexing numpy arrays.
|
|
6
|
+
|
|
7
|
+
This module provides utility functions for mesh preparation, source/detector
|
|
8
|
+
handling, data manipulation, and visualization support.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"meshprep",
|
|
13
|
+
"deldotdel",
|
|
14
|
+
"sdmap",
|
|
15
|
+
"getoptodes",
|
|
16
|
+
"getdistance",
|
|
17
|
+
"getltr",
|
|
18
|
+
"getreff",
|
|
19
|
+
"elem2node",
|
|
20
|
+
"addnoise",
|
|
21
|
+
"meshinterp",
|
|
22
|
+
"src2bc",
|
|
23
|
+
"HAS_ISO2MESH",
|
|
24
|
+
"forcearray",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
import numpy as np
|
|
28
|
+
from scipy import sparse
|
|
29
|
+
from typing import Dict, Tuple, Optional, Union, List, Any
|
|
30
|
+
import warnings
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# Use iso2mesh for mesh operations (maintains 1-based convention)
|
|
34
|
+
try:
|
|
35
|
+
import iso2mesh as i2m
|
|
36
|
+
|
|
37
|
+
HAS_ISO2MESH = True
|
|
38
|
+
except ImportError:
|
|
39
|
+
HAS_ISO2MESH = False
|
|
40
|
+
warnings.warn(
|
|
41
|
+
"iso2mesh not found. Some mesh functions will use fallback implementations."
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# Try to import matplotlib Path for fast inpolygon
|
|
45
|
+
try:
|
|
46
|
+
from matplotlib.path import Path as MplPath
|
|
47
|
+
|
|
48
|
+
HAS_MPL_PATH = True
|
|
49
|
+
except ImportError:
|
|
50
|
+
HAS_MPL_PATH = False
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def deldotdel(cfg: dict) -> Tuple[np.ndarray, np.ndarray]:
|
|
54
|
+
"""
|
|
55
|
+
Compute del(phi_i) dot del(phi_j) for FEM assembly.
|
|
56
|
+
|
|
57
|
+
PRESERVES ORIGINAL ALGORITHM - only optimizes array operations.
|
|
58
|
+
"""
|
|
59
|
+
node = cfg["node"]
|
|
60
|
+
evol = cfg["evol"]
|
|
61
|
+
|
|
62
|
+
# Convert 1-based elem to 0-based for numpy indexing
|
|
63
|
+
elem_0 = cfg["elem"][:, :4].astype(np.int32) - 1
|
|
64
|
+
ne = elem_0.shape[0]
|
|
65
|
+
|
|
66
|
+
# Reshape nodes for vectorized computation: (Ne, 4, 3) -> (3, 4, Ne)
|
|
67
|
+
no = node[elem_0, :].transpose(2, 1, 0) # Shape: (3, 4, Ne)
|
|
68
|
+
|
|
69
|
+
delphi = np.zeros((3, 4, ne))
|
|
70
|
+
|
|
71
|
+
# Column indices for cross-product computation (original algorithm)
|
|
72
|
+
col = np.array([[3, 1, 2, 1], [2, 0, 3, 2], [1, 3, 0, 3], [0, 2, 1, 0]])
|
|
73
|
+
|
|
74
|
+
# evol needs to be shape (ne,) for broadcasting
|
|
75
|
+
evol_inv = 1.0 / (evol * 6.0) # Shape: (ne,)
|
|
76
|
+
|
|
77
|
+
# Original algorithm preserved exactly
|
|
78
|
+
for coord in range(3):
|
|
79
|
+
idx = [c for c in range(3) if c != coord]
|
|
80
|
+
for i in range(4):
|
|
81
|
+
# Each term is shape (ne,)
|
|
82
|
+
term1 = no[idx[0], col[i, 0], :] - no[idx[0], col[i, 1], :]
|
|
83
|
+
term2 = no[idx[1], col[i, 2], :] - no[idx[1], col[i, 3], :]
|
|
84
|
+
term3 = no[idx[0], col[i, 2], :] - no[idx[0], col[i, 3], :]
|
|
85
|
+
term4 = no[idx[1], col[i, 0], :] - no[idx[1], col[i, 1], :]
|
|
86
|
+
|
|
87
|
+
delphi[coord, i, :] = (term1 * term2 - term3 * term4) * evol_inv
|
|
88
|
+
|
|
89
|
+
result = np.zeros((ne, 10))
|
|
90
|
+
count = 0
|
|
91
|
+
for i in range(4):
|
|
92
|
+
for j in range(i, 4):
|
|
93
|
+
result[:, count] = np.sum(delphi[:, i, :] * delphi[:, j, :], axis=0)
|
|
94
|
+
count += 1
|
|
95
|
+
|
|
96
|
+
result *= evol[:, np.newaxis]
|
|
97
|
+
|
|
98
|
+
return result, delphi
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def meshprep(cfg: dict) -> Tuple[dict, np.ndarray]:
|
|
102
|
+
"""
|
|
103
|
+
Prepare mesh structure with all derived quantities.
|
|
104
|
+
|
|
105
|
+
All mesh indices (elem, face) remain 1-based in the returned cfg.
|
|
106
|
+
"""
|
|
107
|
+
from .property import updateprop, getbulk
|
|
108
|
+
|
|
109
|
+
# Convert list inputs to numpy arrays
|
|
110
|
+
cfg = forcearray(
|
|
111
|
+
cfg,
|
|
112
|
+
[
|
|
113
|
+
"node",
|
|
114
|
+
"elem",
|
|
115
|
+
"face",
|
|
116
|
+
"srcpos",
|
|
117
|
+
"srcdir",
|
|
118
|
+
"detpos",
|
|
119
|
+
"detdir",
|
|
120
|
+
"prop",
|
|
121
|
+
"seg",
|
|
122
|
+
"widesrc",
|
|
123
|
+
"widedet",
|
|
124
|
+
],
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
if "node" not in cfg or "elem" not in cfg:
|
|
128
|
+
raise ValueError("cfg must contain 'node' and 'elem'")
|
|
129
|
+
|
|
130
|
+
node = cfg["node"]
|
|
131
|
+
elem = cfg["elem"] # 1-based indices
|
|
132
|
+
|
|
133
|
+
# Extract segmentation if present in elem column 5
|
|
134
|
+
if elem.shape[1] > 4 and ("seg" not in cfg or cfg["seg"] is None):
|
|
135
|
+
cfg["seg"] = elem[:, 4].astype(int)
|
|
136
|
+
|
|
137
|
+
# Reorient elements to have positive volume (outward normals)
|
|
138
|
+
if not cfg.get("isreoriented", False):
|
|
139
|
+
if HAS_ISO2MESH:
|
|
140
|
+
# iso2mesh.meshreorient handles 1-based indices and returns volumes
|
|
141
|
+
elem_reoriented, evol, _ = i2m.meshreorient(node, elem[:, :4])
|
|
142
|
+
cfg["evol"] = evol
|
|
143
|
+
else:
|
|
144
|
+
elem_reoriented = _meshreorient_fallback(node, elem[:, :4])
|
|
145
|
+
|
|
146
|
+
if elem.shape[1] > 4:
|
|
147
|
+
cfg["elem"] = np.column_stack([elem_reoriented, elem[:, 4:]])
|
|
148
|
+
else:
|
|
149
|
+
cfg["elem"] = elem_reoriented
|
|
150
|
+
cfg["isreoriented"] = True
|
|
151
|
+
|
|
152
|
+
# Compute surface faces (1-based indices)
|
|
153
|
+
if "face" not in cfg or cfg["face"] is None:
|
|
154
|
+
if HAS_ISO2MESH:
|
|
155
|
+
face_result = i2m.volface(cfg["elem"][:, :4])
|
|
156
|
+
# volface may return (face, faceid) tuple or just face
|
|
157
|
+
if isinstance(face_result, tuple):
|
|
158
|
+
cfg["face"] = face_result[0]
|
|
159
|
+
else:
|
|
160
|
+
cfg["face"] = face_result
|
|
161
|
+
else:
|
|
162
|
+
cfg["face"] = _volface_fallback(cfg["elem"][:, :4])
|
|
163
|
+
|
|
164
|
+
# Compute face areas
|
|
165
|
+
if "area" not in cfg or cfg["area"] is None:
|
|
166
|
+
if HAS_ISO2MESH:
|
|
167
|
+
cfg["area"] = i2m.elemvolume(node, cfg["face"])
|
|
168
|
+
else:
|
|
169
|
+
cfg["area"] = _elemvolume_fallback(node, cfg["face"])
|
|
170
|
+
|
|
171
|
+
# Compute element volumes
|
|
172
|
+
if "evol" not in cfg or cfg["evol"] is None:
|
|
173
|
+
if HAS_ISO2MESH:
|
|
174
|
+
cfg["evol"] = i2m.elemvolume(node, cfg["elem"][:, :4])
|
|
175
|
+
else:
|
|
176
|
+
cfg["evol"] = _elemvolume_fallback(node, cfg["elem"][:, :4])
|
|
177
|
+
|
|
178
|
+
# Check for degenerate elements
|
|
179
|
+
if np.any(cfg["evol"] == 0):
|
|
180
|
+
bad_elem = np.where(cfg["evol"] == 0)[0]
|
|
181
|
+
raise ValueError(f"Degenerate elements detected at indices: {bad_elem}")
|
|
182
|
+
|
|
183
|
+
# Compute nodal volumes
|
|
184
|
+
if "nvol" not in cfg or cfg["nvol"] is None:
|
|
185
|
+
cfg["nvol"] = _nodevolume(node, cfg["elem"][:, :4], cfg["evol"])
|
|
186
|
+
|
|
187
|
+
# Validate sources and detectors
|
|
188
|
+
if "srcpos" not in cfg:
|
|
189
|
+
raise ValueError("cfg.srcpos is required")
|
|
190
|
+
if "srcdir" not in cfg:
|
|
191
|
+
raise ValueError("cfg.srcdir is required")
|
|
192
|
+
|
|
193
|
+
# Update properties if multi-spectral
|
|
194
|
+
if isinstance(cfg.get("prop"), dict) and "param" in cfg:
|
|
195
|
+
cfg["prop"] = updateprop(cfg)
|
|
196
|
+
|
|
197
|
+
# Compute effective reflection coefficient
|
|
198
|
+
if "reff" not in cfg or cfg["reff"] is None:
|
|
199
|
+
bkprop = getbulk(cfg)
|
|
200
|
+
if isinstance(bkprop, dict):
|
|
201
|
+
cfg["reff"] = {}
|
|
202
|
+
cfg["musp0"] = {}
|
|
203
|
+
for wv, prop in bkprop.items():
|
|
204
|
+
cfg["reff"][wv] = getreff(prop[3], 1.0)
|
|
205
|
+
cfg["musp0"][wv] = prop[1] * (1 - prop[2])
|
|
206
|
+
else:
|
|
207
|
+
cfg["reff"] = getreff(bkprop[3], 1.0)
|
|
208
|
+
cfg["musp0"] = bkprop[1] * (1 - bkprop[2])
|
|
209
|
+
|
|
210
|
+
# Process wide-field sources if present
|
|
211
|
+
srctype = cfg.get("srctype", "pencil")
|
|
212
|
+
if (
|
|
213
|
+
srctype not in ["pencil", "isotropic"] or "widesrcid" in cfg
|
|
214
|
+
) and "widesrc" not in cfg:
|
|
215
|
+
cfg["srcpos0"] = cfg["srcpos"].copy()
|
|
216
|
+
cfg = src2bc(cfg, isdet=False)
|
|
217
|
+
|
|
218
|
+
# Process wide-field detectors if present
|
|
219
|
+
dettype = cfg.get("dettype", "pencil")
|
|
220
|
+
if (
|
|
221
|
+
dettype not in ["pencil", "isotropic"] or "widedetid" in cfg
|
|
222
|
+
) and "widedet" not in cfg:
|
|
223
|
+
cfg["detpos0"] = cfg["detpos"].copy()
|
|
224
|
+
cfg = src2bc(cfg, isdet=True)
|
|
225
|
+
|
|
226
|
+
# Compute sparse matrix structure
|
|
227
|
+
if "cols" not in cfg or cfg["cols"] is None:
|
|
228
|
+
cfg["rows"], cfg["cols"], cfg["idxcount"] = _femnz(
|
|
229
|
+
cfg["elem"][:, :4], node.shape[0]
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
if "idxsum" not in cfg or cfg["idxsum"] is None:
|
|
233
|
+
cfg["idxsum"] = np.cumsum(cfg["idxcount"])
|
|
234
|
+
|
|
235
|
+
# Compute gradient operator
|
|
236
|
+
if "deldotdel" not in cfg or cfg["deldotdel"] is None:
|
|
237
|
+
cfg["deldotdel"], _ = deldotdel(cfg)
|
|
238
|
+
|
|
239
|
+
# Set default modulation frequency
|
|
240
|
+
if "omega" not in cfg:
|
|
241
|
+
cfg["omega"] = 0
|
|
242
|
+
|
|
243
|
+
# Create source-detector mapping
|
|
244
|
+
sd = sdmap(cfg)
|
|
245
|
+
|
|
246
|
+
return cfg, sd
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# ============== Wide-field Source/Detector Functions ==============
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def src2bc(cfg: dict, isdet: bool = False) -> dict:
|
|
253
|
+
"""
|
|
254
|
+
Convert wide-field source/detector forms into boundary conditions.
|
|
255
|
+
|
|
256
|
+
This function computes the inward flux on mesh surface triangles for
|
|
257
|
+
wide-field illumination patterns (planar, pattern, fourier sources).
|
|
258
|
+
|
|
259
|
+
Parameters
|
|
260
|
+
----------
|
|
261
|
+
cfg : dict
|
|
262
|
+
Simulation configuration containing mesh and source/detector info
|
|
263
|
+
isdet : bool
|
|
264
|
+
If False (default), process sources. If True, process detectors.
|
|
265
|
+
|
|
266
|
+
Returns
|
|
267
|
+
-------
|
|
268
|
+
cfg : dict
|
|
269
|
+
Updated configuration with widesrc/widedet fields added
|
|
270
|
+
"""
|
|
271
|
+
if not HAS_ISO2MESH:
|
|
272
|
+
raise ImportError("iso2mesh is required for wide-field source processing")
|
|
273
|
+
|
|
274
|
+
# Determine field names based on source vs detector
|
|
275
|
+
if not isdet:
|
|
276
|
+
type_key, pos_key, dir_key = "srctype", "srcpos", "srcdir"
|
|
277
|
+
param1_key, param2_key = "srcparam1", "srcparam2"
|
|
278
|
+
pattern_key, weight_key = "srcpattern", "srcweight"
|
|
279
|
+
id_key, wideid_key = "srcid", "widesrcid"
|
|
280
|
+
out_key, mapping_key = "widesrc", "wfsrcmapping"
|
|
281
|
+
else:
|
|
282
|
+
type_key, pos_key, dir_key = "dettype", "detpos", "detdir"
|
|
283
|
+
param1_key, param2_key = "detparam1", "detparam2"
|
|
284
|
+
pattern_key, weight_key = "detpattern", "detweight"
|
|
285
|
+
id_key, wideid_key = "detid", "widedetid"
|
|
286
|
+
out_key, mapping_key = "widedet", "wfdetmapping"
|
|
287
|
+
|
|
288
|
+
# Check if wide-field processing is needed
|
|
289
|
+
srctype = cfg.get(type_key, "pencil")
|
|
290
|
+
if srctype in ["pencil", "isotropic"] and wideid_key not in cfg:
|
|
291
|
+
return cfg
|
|
292
|
+
|
|
293
|
+
srcdir = np.atleast_2d(cfg[dir_key])
|
|
294
|
+
sources = np.atleast_2d(cfg[pos_key])
|
|
295
|
+
|
|
296
|
+
# Build wide-field source parameter structure
|
|
297
|
+
if wideid_key in cfg:
|
|
298
|
+
widesrcid = cfg[wideid_key]
|
|
299
|
+
if not isinstance(widesrcid, dict):
|
|
300
|
+
widesrcid = {"": widesrcid}
|
|
301
|
+
else:
|
|
302
|
+
tempwf = {
|
|
303
|
+
"srctype": [srctype],
|
|
304
|
+
"srcid": [cfg.get(id_key, 0)],
|
|
305
|
+
"srcparam1": [np.atleast_1d(cfg[param1_key])],
|
|
306
|
+
"srcparam2": [np.atleast_1d(cfg[param2_key])],
|
|
307
|
+
}
|
|
308
|
+
if pattern_key in cfg:
|
|
309
|
+
tempwf["srcpattern"] = [cfg[pattern_key]]
|
|
310
|
+
if weight_key in cfg:
|
|
311
|
+
tempwf["srcweight"] = [cfg[weight_key]]
|
|
312
|
+
widesrcid = {"": tempwf}
|
|
313
|
+
|
|
314
|
+
# Handle optical properties
|
|
315
|
+
if isinstance(cfg.get("prop"), dict):
|
|
316
|
+
prop = cfg["prop"]
|
|
317
|
+
else:
|
|
318
|
+
prop = {"": cfg["prop"]}
|
|
319
|
+
|
|
320
|
+
# Ensure widesrcid keys match prop keys
|
|
321
|
+
if set(widesrcid.keys()) != set(prop.keys()):
|
|
322
|
+
if "" in widesrcid:
|
|
323
|
+
temp = widesrcid[""]
|
|
324
|
+
widesrcid = {wv: temp for wv in prop.keys()}
|
|
325
|
+
|
|
326
|
+
widesrc_list = []
|
|
327
|
+
wavelengths = list(prop.keys())
|
|
328
|
+
wfsrcmapping = {}
|
|
329
|
+
all_wide_ids = []
|
|
330
|
+
|
|
331
|
+
for wv in wavelengths:
|
|
332
|
+
wideparam = widesrcid[wv]
|
|
333
|
+
op = prop[wv]
|
|
334
|
+
|
|
335
|
+
# Compute 1/mu_s' for sinking collimated sources
|
|
336
|
+
if op.ndim == 1:
|
|
337
|
+
z0 = 1.0 / (op[0] + op[1] * (1 - op[2]))
|
|
338
|
+
else:
|
|
339
|
+
z0 = 1.0 / (op[1, 0] + op[1, 1] * (1 - op[1, 2]))
|
|
340
|
+
|
|
341
|
+
srcmapping = []
|
|
342
|
+
|
|
343
|
+
for wideidx in range(len(wideparam["srcid"])):
|
|
344
|
+
srcid = wideparam["srcid"][wideidx]
|
|
345
|
+
all_wide_ids.append(srcid)
|
|
346
|
+
srctype_i = wideparam["srctype"][wideidx]
|
|
347
|
+
srcparam1 = wideparam["srcparam1"][wideidx]
|
|
348
|
+
srcparam2 = wideparam["srcparam2"][wideidx]
|
|
349
|
+
|
|
350
|
+
srcpattern = None
|
|
351
|
+
if srctype_i == "pattern" and "srcpattern" in wideparam:
|
|
352
|
+
srcpattern = wideparam["srcpattern"][wideidx]
|
|
353
|
+
|
|
354
|
+
srcweight = None
|
|
355
|
+
if "srcweight" in wideparam:
|
|
356
|
+
srcweight = wideparam["srcweight"][wideidx]
|
|
357
|
+
|
|
358
|
+
srcpos = sources[srcid, :3]
|
|
359
|
+
srcdir_i = srcdir[0, :3] if srcdir.shape[0] == 1 else srcdir[srcid, :3]
|
|
360
|
+
|
|
361
|
+
# Process based on source type
|
|
362
|
+
if srctype_i in ["planar", "pattern", "fourier"]:
|
|
363
|
+
(
|
|
364
|
+
srcbc,
|
|
365
|
+
patsize,
|
|
366
|
+
pface,
|
|
367
|
+
parea,
|
|
368
|
+
pnode,
|
|
369
|
+
nodeid,
|
|
370
|
+
used_sinkplane,
|
|
371
|
+
) = _process_planar_source(
|
|
372
|
+
cfg,
|
|
373
|
+
srcpos,
|
|
374
|
+
srcdir_i,
|
|
375
|
+
srcparam1,
|
|
376
|
+
srcparam2,
|
|
377
|
+
srctype_i,
|
|
378
|
+
srcpattern,
|
|
379
|
+
z0,
|
|
380
|
+
)
|
|
381
|
+
else:
|
|
382
|
+
raise ValueError(f"Source type '{srctype_i}' is not supported")
|
|
383
|
+
|
|
384
|
+
# Apply boundary condition weighting
|
|
385
|
+
reff = cfg["reff"]
|
|
386
|
+
Reff = reff[wv] if isinstance(reff, dict) else reff
|
|
387
|
+
|
|
388
|
+
rhs = _apply_bc_weighting(
|
|
389
|
+
cfg, srcbc, Reff, srcweight, pface, parea, pnode, nodeid, used_sinkplane
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
# Record mapping
|
|
393
|
+
indices = [len(widesrc_list), len(widesrc_list) + patsize - 1]
|
|
394
|
+
widesrc_list.append(rhs)
|
|
395
|
+
srcmapping.append([srcid, indices[0], indices[1]])
|
|
396
|
+
|
|
397
|
+
wfsrcmapping[wv] = np.array(srcmapping) if srcmapping else np.array([])
|
|
398
|
+
|
|
399
|
+
# Stack all wide-field sources
|
|
400
|
+
if widesrc_list:
|
|
401
|
+
widesrc = np.vstack(widesrc_list)
|
|
402
|
+
else:
|
|
403
|
+
widesrc = np.array([])
|
|
404
|
+
|
|
405
|
+
# Remove wide-field source positions from point sources
|
|
406
|
+
unique_wide_ids = list(set(all_wide_ids))
|
|
407
|
+
mask = np.ones(sources.shape[0], dtype=bool)
|
|
408
|
+
mask[unique_wide_ids] = False
|
|
409
|
+
sources = sources[mask]
|
|
410
|
+
|
|
411
|
+
# Simplify mapping if single wavelength
|
|
412
|
+
if len(wfsrcmapping) == 1:
|
|
413
|
+
wfsrcmapping = wfsrcmapping[wavelengths[0]]
|
|
414
|
+
|
|
415
|
+
# Update cfg - transpose to (Nn x Npattern)
|
|
416
|
+
cfg[out_key] = widesrc.T if widesrc.size > 0 else np.array([])
|
|
417
|
+
cfg[mapping_key] = wfsrcmapping
|
|
418
|
+
cfg[pos_key] = sources
|
|
419
|
+
|
|
420
|
+
return cfg
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
def _process_planar_source(
|
|
424
|
+
cfg: dict,
|
|
425
|
+
srcpos: np.ndarray,
|
|
426
|
+
srcdir: np.ndarray,
|
|
427
|
+
srcparam1: np.ndarray,
|
|
428
|
+
srcparam2: np.ndarray,
|
|
429
|
+
srctype: str,
|
|
430
|
+
srcpattern: Optional[np.ndarray],
|
|
431
|
+
z0: float,
|
|
432
|
+
) -> Tuple[
|
|
433
|
+
np.ndarray, int, np.ndarray, np.ndarray, np.ndarray, Optional[np.ndarray], bool
|
|
434
|
+
]:
|
|
435
|
+
"""Process planar/pattern/fourier source geometry."""
|
|
436
|
+
|
|
437
|
+
# Define rectangular source polygon (5 points, closed)
|
|
438
|
+
ps = np.array(
|
|
439
|
+
[
|
|
440
|
+
srcpos,
|
|
441
|
+
srcpos + srcparam1[:3],
|
|
442
|
+
srcpos + srcparam1[:3] + srcparam2[:3],
|
|
443
|
+
srcpos + srcparam2[:3],
|
|
444
|
+
srcpos,
|
|
445
|
+
]
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
node = cfg["node"]
|
|
449
|
+
face = cfg["face"] # 1-based
|
|
450
|
+
|
|
451
|
+
iscolimated = cfg.get("iscolimated", True)
|
|
452
|
+
nodeid = None
|
|
453
|
+
|
|
454
|
+
if iscolimated:
|
|
455
|
+
# Create sunk plane using qmeshcut
|
|
456
|
+
sinkplane = np.zeros(4)
|
|
457
|
+
sinkplane[:3] = srcdir
|
|
458
|
+
sinkplane[3] = -np.dot(srcdir, srcpos + srcdir * z0)
|
|
459
|
+
|
|
460
|
+
elem = cfg["elem"][:, :4].astype(int) # 1-based
|
|
461
|
+
nodevals = np.zeros(node.shape[0])
|
|
462
|
+
|
|
463
|
+
cutpos, cutvalue, facedata, elemid, nodeid = i2m.qmeshcut(
|
|
464
|
+
elem, node, nodevals, sinkplane
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
pnode = cutpos
|
|
468
|
+
# facedata: when col[2] == col[3], it's a triangle; otherwise quad
|
|
469
|
+
tri_mask = facedata[:, 2] == facedata[:, 3]
|
|
470
|
+
pface = facedata[tri_mask, :3].astype(int) # 1-based
|
|
471
|
+
|
|
472
|
+
quad_idx = np.where(~tri_mask)[0]
|
|
473
|
+
if len(quad_idx) > 0:
|
|
474
|
+
quad_tri1 = facedata[quad_idx][:, [0, 1, 2]].astype(int)
|
|
475
|
+
quad_tri2 = facedata[quad_idx][:, [0, 2, 3]].astype(int)
|
|
476
|
+
pface = np.vstack([pface, quad_tri1, quad_tri2])
|
|
477
|
+
|
|
478
|
+
parea = i2m.elemvolume(pnode, pface) # 1-based face
|
|
479
|
+
used_sinkplane = True
|
|
480
|
+
else:
|
|
481
|
+
pnode = node
|
|
482
|
+
pface = face
|
|
483
|
+
parea = cfg["area"]
|
|
484
|
+
used_sinkplane = False
|
|
485
|
+
|
|
486
|
+
# Compute face centroids using iso2mesh (1-based)
|
|
487
|
+
c0 = i2m.meshcentroid(pnode, pface)
|
|
488
|
+
|
|
489
|
+
# Rotate to align srcdir with z-axis using iso2mesh
|
|
490
|
+
all_pts = np.vstack([c0, ps])
|
|
491
|
+
newnode = i2m.rotatevec3d(all_pts, srcdir[:3])
|
|
492
|
+
|
|
493
|
+
srcpoly = newnode[-5:, :2] # Last 5 points are polygon
|
|
494
|
+
centroids_2d = newnode[:-5, :2]
|
|
495
|
+
|
|
496
|
+
# Test which centroids are inside the source polygon
|
|
497
|
+
isin = _inpolygon(
|
|
498
|
+
centroids_2d[:, 0], centroids_2d[:, 1], srcpoly[:, 0], srcpoly[:, 1]
|
|
499
|
+
)
|
|
500
|
+
idx = np.where(isin)[0]
|
|
501
|
+
|
|
502
|
+
if len(idx) == 0:
|
|
503
|
+
raise ValueError("Source direction does not intersect with the domain")
|
|
504
|
+
|
|
505
|
+
# Check face orientations - convert to 0-based for indexing
|
|
506
|
+
pface_0 = pface - 1
|
|
507
|
+
AB = pnode[pface_0[idx, 1], :] - pnode[pface_0[idx, 0], :]
|
|
508
|
+
AC = pnode[pface_0[idx, 2], :] - pnode[pface_0[idx, 0], :]
|
|
509
|
+
N = np.cross(AB, AC)
|
|
510
|
+
|
|
511
|
+
dir_dot = np.sum(N * srcdir, axis=1)
|
|
512
|
+
|
|
513
|
+
if used_sinkplane:
|
|
514
|
+
dir_dot[dir_dot > 0] = -dir_dot[dir_dot > 0]
|
|
515
|
+
|
|
516
|
+
if np.all(dir_dot >= 0):
|
|
517
|
+
raise ValueError("Please reorient the surface triangles")
|
|
518
|
+
|
|
519
|
+
valid_mask = dir_dot < 0
|
|
520
|
+
valid_idx = idx[valid_mask]
|
|
521
|
+
|
|
522
|
+
# Initialize boundary condition array
|
|
523
|
+
srcbc = np.zeros((1, len(pface)))
|
|
524
|
+
srcbc[0, valid_idx] = 1.0
|
|
525
|
+
|
|
526
|
+
# Compute normalized coordinates for pattern lookup
|
|
527
|
+
pbc = centroids_2d[valid_idx, :]
|
|
528
|
+
dp = pbc - srcpoly[0, :]
|
|
529
|
+
dx = srcpoly[1, :] - srcpoly[0, :]
|
|
530
|
+
dy = srcpoly[3, :] - srcpoly[0, :]
|
|
531
|
+
nx = dx / np.linalg.norm(dx)
|
|
532
|
+
ny = dy / np.linalg.norm(dy)
|
|
533
|
+
|
|
534
|
+
bary = np.column_stack(
|
|
535
|
+
[
|
|
536
|
+
np.sum(dp * nx, axis=1) / np.linalg.norm(dx),
|
|
537
|
+
np.sum(dp * ny, axis=1) / np.linalg.norm(dy),
|
|
538
|
+
]
|
|
539
|
+
)
|
|
540
|
+
bary = np.clip(bary, 0, 1 - 1e-6)
|
|
541
|
+
|
|
542
|
+
patsize = 1
|
|
543
|
+
|
|
544
|
+
if srcpattern is not None and srctype == "pattern":
|
|
545
|
+
if srcpattern.ndim == 2:
|
|
546
|
+
srcpattern = srcpattern[:, :, np.newaxis]
|
|
547
|
+
|
|
548
|
+
pdim = srcpattern.shape
|
|
549
|
+
patsize = pdim[2] if len(pdim) > 2 else 1
|
|
550
|
+
|
|
551
|
+
srcbc = np.zeros((patsize, len(pface)))
|
|
552
|
+
|
|
553
|
+
for i in range(patsize):
|
|
554
|
+
pat = srcpattern[:, :, i] if patsize > 1 else srcpattern[:, :, 0]
|
|
555
|
+
ix = np.clip((bary[:, 0] * pdim[1]).astype(int), 0, pdim[1] - 1)
|
|
556
|
+
iy = np.clip((bary[:, 1] * pdim[0]).astype(int), 0, pdim[0] - 1)
|
|
557
|
+
srcbc[i, valid_idx] = pat[iy, ix]
|
|
558
|
+
|
|
559
|
+
elif srctype == "fourier":
|
|
560
|
+
kx = int(srcparam1[3])
|
|
561
|
+
ky = int(srcparam2[3])
|
|
562
|
+
phi0 = (srcparam1[3] - kx) * 2 * np.pi
|
|
563
|
+
M = 1 - (srcparam2[3] - ky)
|
|
564
|
+
|
|
565
|
+
patsize = kx * ky
|
|
566
|
+
srcbc = np.zeros((patsize, len(pface)))
|
|
567
|
+
|
|
568
|
+
for i in range(kx):
|
|
569
|
+
for j in range(ky):
|
|
570
|
+
pattern_idx = i * ky + j
|
|
571
|
+
srcbc[pattern_idx, valid_idx] = 0.5 * (
|
|
572
|
+
1 + M * np.cos((i * bary[:, 0] + j * bary[:, 1]) * 2 * np.pi + phi0)
|
|
573
|
+
)
|
|
574
|
+
|
|
575
|
+
return srcbc, patsize, pface, parea, pnode, nodeid, used_sinkplane
|
|
576
|
+
|
|
577
|
+
|
|
578
|
+
def _apply_bc_weighting(
|
|
579
|
+
cfg: dict,
|
|
580
|
+
srcbc: np.ndarray,
|
|
581
|
+
Reff: float,
|
|
582
|
+
srcweight: Optional[np.ndarray],
|
|
583
|
+
pface: np.ndarray,
|
|
584
|
+
parea: np.ndarray,
|
|
585
|
+
pnode: np.ndarray,
|
|
586
|
+
nodeid: Optional[np.ndarray],
|
|
587
|
+
used_sinkplane: bool,
|
|
588
|
+
) -> np.ndarray:
|
|
589
|
+
"""Apply boundary condition weighting to convert flux to nodal values."""
|
|
590
|
+
|
|
591
|
+
nn = cfg["node"].shape[0]
|
|
592
|
+
npattern = srcbc.shape[0]
|
|
593
|
+
|
|
594
|
+
# Boundary condition coefficient: 1/18 = 1/2 * 1/9
|
|
595
|
+
Adiagbc = parea * ((1 - Reff) / (18 * (1 + Reff)))
|
|
596
|
+
Adiagbc_weighted = Adiagbc[:, np.newaxis] * srcbc.T # (Nface, Npattern)
|
|
597
|
+
|
|
598
|
+
rhs = np.zeros((nn, npattern))
|
|
599
|
+
pface_0 = pface - 1 # Convert to 0-based
|
|
600
|
+
|
|
601
|
+
if nodeid is not None and used_sinkplane:
|
|
602
|
+
# nodeid from qmeshcut: [node1_idx, node2_idx, weight]
|
|
603
|
+
# node indices are 1-based, weight SHOULD be in [0,1]
|
|
604
|
+
# but qmeshcut bug adds +1, so weights are in [1,2] - subtract 1 to fix
|
|
605
|
+
nodeweight = nodeid[:, 2] - 1.0 if nodeid.shape[1] > 2 else np.ones(len(nodeid))
|
|
606
|
+
node_ids = nodeid[:, :2].astype(int) - 1 # Convert to 0-based
|
|
607
|
+
|
|
608
|
+
for i in range(npattern):
|
|
609
|
+
for j in range(3):
|
|
610
|
+
face_node_idx = pface_0[:, j]
|
|
611
|
+
face_nodes_1 = node_ids[face_node_idx, 0]
|
|
612
|
+
face_nodes_2 = node_ids[face_node_idx, 1]
|
|
613
|
+
weights_1 = nodeweight[face_node_idx]
|
|
614
|
+
weights_2 = 1 - weights_1
|
|
615
|
+
|
|
616
|
+
np.add.at(rhs[:, i], face_nodes_1, Adiagbc_weighted[:, i] * weights_1)
|
|
617
|
+
np.add.at(rhs[:, i], face_nodes_2, Adiagbc_weighted[:, i] * weights_2)
|
|
618
|
+
else:
|
|
619
|
+
for i in range(npattern):
|
|
620
|
+
for j in range(3):
|
|
621
|
+
np.add.at(rhs[:, i], pface_0[:, j], Adiagbc_weighted[:, i])
|
|
622
|
+
|
|
623
|
+
# Normalize each pattern
|
|
624
|
+
for i in range(npattern):
|
|
625
|
+
wsrc = 1.0
|
|
626
|
+
if srcweight is not None:
|
|
627
|
+
if isinstance(srcweight, (int, float)):
|
|
628
|
+
wsrc = srcweight
|
|
629
|
+
elif len(srcweight) == npattern:
|
|
630
|
+
wsrc = srcweight[i]
|
|
631
|
+
|
|
632
|
+
norm = np.sum(np.abs(rhs[:, i]))
|
|
633
|
+
if norm > 0:
|
|
634
|
+
rhs[:, i] = rhs[:, i] * (wsrc / norm)
|
|
635
|
+
|
|
636
|
+
return rhs.T # Return (Npattern, Nn)
|
|
637
|
+
|
|
638
|
+
|
|
639
|
+
def _inpolygon(
|
|
640
|
+
x: np.ndarray, y: np.ndarray, px: np.ndarray, py: np.ndarray
|
|
641
|
+
) -> np.ndarray:
|
|
642
|
+
"""
|
|
643
|
+
Test if points (x, y) are inside polygon defined by (px, py).
|
|
644
|
+
|
|
645
|
+
Uses matplotlib Path if available (faster), otherwise ray casting.
|
|
646
|
+
"""
|
|
647
|
+
points = np.column_stack([x, y])
|
|
648
|
+
polygon = np.column_stack([px, py])
|
|
649
|
+
|
|
650
|
+
if HAS_MPL_PATH:
|
|
651
|
+
# Use matplotlib's optimized path contains
|
|
652
|
+
path = MplPath(polygon)
|
|
653
|
+
return path.contains_points(points)
|
|
654
|
+
|
|
655
|
+
# Fallback: vectorized ray casting
|
|
656
|
+
n = len(px) - 1 # Last point same as first
|
|
657
|
+
inside = np.zeros(len(x), dtype=bool)
|
|
658
|
+
|
|
659
|
+
for i in range(n):
|
|
660
|
+
x1, y1 = px[i], py[i]
|
|
661
|
+
x2, y2 = px[i + 1], py[i + 1]
|
|
662
|
+
|
|
663
|
+
cond1 = ((y1 <= y) & (y < y2)) | ((y2 <= y) & (y < y1))
|
|
664
|
+
|
|
665
|
+
if np.any(cond1):
|
|
666
|
+
x_intersect = (x2 - x1) * (y[cond1] - y1) / (y2 - y1 + 1e-30) + x1
|
|
667
|
+
inside[cond1] ^= x[cond1] < x_intersect
|
|
668
|
+
|
|
669
|
+
return inside
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
# ============== Source/Detector Mapping Functions ==============
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
def sdmap(cfg: dict, maxdist: float = np.inf, **kwargs) -> Union[np.ndarray, dict]:
|
|
676
|
+
"""
|
|
677
|
+
Create source-detector mapping table.
|
|
678
|
+
|
|
679
|
+
Returns
|
|
680
|
+
-------
|
|
681
|
+
sd : ndarray or dict
|
|
682
|
+
Mapping table with columns [src_col, det_col, active_flag, mode]
|
|
683
|
+
- src_col: Column index in phi matrix for this source (0-based)
|
|
684
|
+
- det_col: Column index in phi matrix for this detector (0-based)
|
|
685
|
+
- active_flag: 1 if pair is active, 0 otherwise
|
|
686
|
+
- mode: Measurement mode
|
|
687
|
+
|
|
688
|
+
Column layout in RHS/phi:
|
|
689
|
+
[0:srcnum] = point sources
|
|
690
|
+
[srcnum:srcnum+wfsrcnum] = wide-field sources
|
|
691
|
+
[srcnum+wfsrcnum:srcnum+wfsrcnum+detnum] = point detectors
|
|
692
|
+
[srcnum+wfsrcnum+detnum:end] = wide-field detectors
|
|
693
|
+
"""
|
|
694
|
+
# Get counts
|
|
695
|
+
srcnum = 0
|
|
696
|
+
if "srcpos" in cfg and cfg["srcpos"] is not None:
|
|
697
|
+
srcpos = np.atleast_2d(cfg["srcpos"])
|
|
698
|
+
if srcpos.size > 0:
|
|
699
|
+
srcnum = srcpos.shape[0]
|
|
700
|
+
|
|
701
|
+
detnum = 0
|
|
702
|
+
if "detpos" in cfg and cfg["detpos"] is not None:
|
|
703
|
+
detpos = np.atleast_2d(cfg["detpos"])
|
|
704
|
+
if detpos.size > 0:
|
|
705
|
+
detnum = detpos.shape[0]
|
|
706
|
+
|
|
707
|
+
# widesrc/widedet stored as (Nn x Npattern)
|
|
708
|
+
wfsrcnum = 0
|
|
709
|
+
if "widesrc" in cfg and cfg["widesrc"] is not None and cfg["widesrc"].size > 0:
|
|
710
|
+
wfsrcnum = cfg["widesrc"].shape[1]
|
|
711
|
+
|
|
712
|
+
wfdetnum = 0
|
|
713
|
+
if "widedet" in cfg and cfg["widedet"] is not None and cfg["widedet"].size > 0:
|
|
714
|
+
wfdetnum = cfg["widedet"].shape[1]
|
|
715
|
+
|
|
716
|
+
if (srcnum + wfsrcnum) == 0 or (detnum + wfdetnum) == 0:
|
|
717
|
+
raise ValueError("Must define at least one source and detector")
|
|
718
|
+
|
|
719
|
+
badsrc = kwargs.get("excludesrc", cfg.get("excludesrc", []))
|
|
720
|
+
baddet = kwargs.get("excludedet", cfg.get("excludedet", []))
|
|
721
|
+
|
|
722
|
+
# Good point sources/detectors (0-based indices)
|
|
723
|
+
goodsrc = sorted(set(range(srcnum)) - set(badsrc))
|
|
724
|
+
gooddet = sorted(set(range(detnum)) - set(baddet))
|
|
725
|
+
|
|
726
|
+
# Wide-field source/detector indices (offset by point source/detector count)
|
|
727
|
+
goodwfsrc = list(range(srcnum, srcnum + wfsrcnum))
|
|
728
|
+
goodwfdet = list(range(detnum, detnum + wfdetnum))
|
|
729
|
+
|
|
730
|
+
# Detector column offset in RHS matrix
|
|
731
|
+
det_offset = srcnum + wfsrcnum
|
|
732
|
+
|
|
733
|
+
if isinstance(cfg.get("prop"), dict):
|
|
734
|
+
wavelengths = list(cfg["prop"].keys())
|
|
735
|
+
sd = {}
|
|
736
|
+
|
|
737
|
+
for wv in wavelengths:
|
|
738
|
+
# All source indices (point + wide)
|
|
739
|
+
all_src = goodsrc + goodwfsrc
|
|
740
|
+
# All detector indices (point + wide), offset for RHS column
|
|
741
|
+
all_det = [det_offset + d for d in gooddet] + [
|
|
742
|
+
det_offset + d for d in goodwfdet
|
|
743
|
+
]
|
|
744
|
+
|
|
745
|
+
ss, dd = np.meshgrid(all_src, all_det)
|
|
746
|
+
sdwv = np.column_stack([ss.flatten(), dd.flatten()])
|
|
747
|
+
|
|
748
|
+
active = np.ones(len(sdwv))
|
|
749
|
+
|
|
750
|
+
# Mark bad pairs as inactive (only for point sources/detectors)
|
|
751
|
+
for i in range(len(sdwv)):
|
|
752
|
+
si = int(sdwv[i, 0])
|
|
753
|
+
di = int(sdwv[i, 1]) - det_offset
|
|
754
|
+
if si < srcnum and di < detnum:
|
|
755
|
+
if si in badsrc or di in baddet:
|
|
756
|
+
active[i] = 0
|
|
757
|
+
|
|
758
|
+
# Filter by max distance if specified (only for point sources/detectors)
|
|
759
|
+
if maxdist < np.inf and srcnum > 0 and detnum > 0:
|
|
760
|
+
dist = getdistance(cfg["srcpos"], cfg["detpos"], badsrc, baddet)
|
|
761
|
+
for i in range(len(sdwv)):
|
|
762
|
+
si = int(sdwv[i, 0])
|
|
763
|
+
di = int(sdwv[i, 1]) - det_offset
|
|
764
|
+
if si < srcnum and di < detnum:
|
|
765
|
+
if dist[di, si] >= maxdist:
|
|
766
|
+
active[i] = 0
|
|
767
|
+
|
|
768
|
+
sdwv = np.column_stack([sdwv, active, np.ones(len(sdwv))])
|
|
769
|
+
sd[wv] = sdwv
|
|
770
|
+
|
|
771
|
+
return sd
|
|
772
|
+
else:
|
|
773
|
+
# Single wavelength
|
|
774
|
+
all_src = goodsrc + goodwfsrc
|
|
775
|
+
all_det = [det_offset + d for d in gooddet] + [
|
|
776
|
+
det_offset + d for d in goodwfdet
|
|
777
|
+
]
|
|
778
|
+
|
|
779
|
+
ss, dd = np.meshgrid(all_src, all_det)
|
|
780
|
+
sd = np.column_stack([ss.flatten(), dd.flatten()])
|
|
781
|
+
|
|
782
|
+
if maxdist < np.inf and srcnum > 0 and detnum > 0:
|
|
783
|
+
dist = getdistance(cfg["srcpos"], cfg["detpos"], badsrc, baddet)
|
|
784
|
+
active = []
|
|
785
|
+
for s, d in sd:
|
|
786
|
+
si = int(s)
|
|
787
|
+
di = int(d) - det_offset
|
|
788
|
+
if si < srcnum and di < detnum:
|
|
789
|
+
active.append(1.0 if dist[di, si] < maxdist else 0.0)
|
|
790
|
+
else:
|
|
791
|
+
active.append(1.0) # Wide-field always active
|
|
792
|
+
sd = np.column_stack([sd, np.array(active)])
|
|
793
|
+
else:
|
|
794
|
+
sd = np.column_stack([sd, np.ones(len(sd))])
|
|
795
|
+
|
|
796
|
+
return sd
|
|
797
|
+
|
|
798
|
+
|
|
799
|
+
def getoptodes(
|
|
800
|
+
cfg: dict, wv: str = ""
|
|
801
|
+
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
|
802
|
+
"""Get combined optode positions with inward displacement by 1/mu_tr."""
|
|
803
|
+
ltr = getltr(cfg, wv)
|
|
804
|
+
|
|
805
|
+
pointsrc = None
|
|
806
|
+
pointdet = None
|
|
807
|
+
widesrc = cfg.get("widesrc", None)
|
|
808
|
+
widedet = cfg.get("widedet", None)
|
|
809
|
+
|
|
810
|
+
if "srcpos" in cfg and cfg["srcpos"] is not None and cfg["srcpos"].size > 0:
|
|
811
|
+
srcdir = cfg["srcdir"]
|
|
812
|
+
if srcdir.shape[0] == 1:
|
|
813
|
+
srcdir = np.tile(srcdir, (cfg["srcpos"].shape[0], 1))
|
|
814
|
+
pointsrc = cfg["srcpos"] + srcdir * ltr
|
|
815
|
+
|
|
816
|
+
if "detpos" in cfg and cfg["detpos"] is not None and cfg["detpos"].size > 0:
|
|
817
|
+
detdir = cfg.get("detdir", cfg["srcdir"])
|
|
818
|
+
if detdir.shape[0] == 1:
|
|
819
|
+
detdir = np.tile(detdir, (cfg["detpos"].shape[0], 1))
|
|
820
|
+
pointdet = cfg["detpos"] + detdir * ltr
|
|
821
|
+
|
|
822
|
+
return pointsrc, pointdet, widesrc, widedet
|
|
823
|
+
|
|
824
|
+
|
|
825
|
+
def getdistance(
|
|
826
|
+
srcpos: np.ndarray,
|
|
827
|
+
detpos: np.ndarray,
|
|
828
|
+
badsrc: List[int] = None,
|
|
829
|
+
baddet: List[int] = None,
|
|
830
|
+
widesrc: np.ndarray = None,
|
|
831
|
+
widedet: np.ndarray = None,
|
|
832
|
+
) -> np.ndarray:
|
|
833
|
+
"""Calculate source-detector distances. Returns (Ndet x Nsrc) matrix."""
|
|
834
|
+
badsrc = badsrc or []
|
|
835
|
+
baddet = baddet or []
|
|
836
|
+
|
|
837
|
+
srcnum = srcpos.shape[0]
|
|
838
|
+
detnum = detpos.shape[0]
|
|
839
|
+
widesrcnum = widesrc.shape[0] if widesrc is not None else 0
|
|
840
|
+
widedetnum = widedet.shape[0] if widedet is not None else 0
|
|
841
|
+
|
|
842
|
+
total_src = srcnum + widesrcnum
|
|
843
|
+
total_det = detnum + widedetnum
|
|
844
|
+
|
|
845
|
+
dist = np.full((total_det, total_src), np.inf)
|
|
846
|
+
|
|
847
|
+
goodsrc = sorted(set(range(srcnum)) - set(badsrc))
|
|
848
|
+
gooddet = sorted(set(range(detnum)) - set(baddet))
|
|
849
|
+
|
|
850
|
+
if len(goodsrc) > 0 and len(gooddet) > 0:
|
|
851
|
+
src_good = srcpos[goodsrc, :3]
|
|
852
|
+
det_good = detpos[gooddet, :3]
|
|
853
|
+
|
|
854
|
+
diff = src_good[np.newaxis, :, :] - det_good[:, np.newaxis, :]
|
|
855
|
+
d = np.sqrt(np.sum(diff**2, axis=2))
|
|
856
|
+
|
|
857
|
+
dist[np.ix_(gooddet, goodsrc)] = d
|
|
858
|
+
|
|
859
|
+
return dist
|
|
860
|
+
|
|
861
|
+
|
|
862
|
+
def getltr(cfg: dict, wv: str = "") -> float:
|
|
863
|
+
"""Calculate transport mean free path l_tr = 1/(mua + musp)."""
|
|
864
|
+
from . import property as prop_module
|
|
865
|
+
|
|
866
|
+
bkprop = prop_module.getbulk(cfg)
|
|
867
|
+
|
|
868
|
+
if isinstance(bkprop, dict):
|
|
869
|
+
if not wv:
|
|
870
|
+
wv = list(bkprop.keys())[0]
|
|
871
|
+
bkprop = bkprop[wv]
|
|
872
|
+
|
|
873
|
+
mua = bkprop[0]
|
|
874
|
+
musp = bkprop[1] * (1 - bkprop[2])
|
|
875
|
+
|
|
876
|
+
return 1.0 / (mua + musp)
|
|
877
|
+
|
|
878
|
+
|
|
879
|
+
def getreff(n_in: float, n_out: float = 1.0) -> float:
|
|
880
|
+
"""Calculate effective reflection coefficient (Haskell 1994)."""
|
|
881
|
+
if n_in <= n_out:
|
|
882
|
+
return 0.0
|
|
883
|
+
|
|
884
|
+
oc = np.arcsin(n_out / n_in)
|
|
885
|
+
ostep = np.pi / 2000
|
|
886
|
+
|
|
887
|
+
o = np.arange(0, oc, ostep)
|
|
888
|
+
|
|
889
|
+
cosop = np.sqrt(1 - (n_in * np.sin(o)) ** 2)
|
|
890
|
+
coso = np.cos(o)
|
|
891
|
+
|
|
892
|
+
r_fres = 0.5 * ((n_in * cosop - n_out * coso) / (n_in * cosop + n_out * coso)) ** 2
|
|
893
|
+
r_fres += 0.5 * ((n_in * coso - n_out * cosop) / (n_in * coso + n_out * cosop)) ** 2
|
|
894
|
+
|
|
895
|
+
o_full = np.arange(0, np.pi / 2, ostep)
|
|
896
|
+
r_fres_full = np.ones(len(o_full))
|
|
897
|
+
r_fres_full[: len(r_fres)] = r_fres
|
|
898
|
+
|
|
899
|
+
coso_full = np.cos(o_full)
|
|
900
|
+
|
|
901
|
+
r_phi = 2 * np.sum(np.sin(o_full) * coso_full * r_fres_full) * ostep
|
|
902
|
+
r_j = 3 * np.sum(np.sin(o_full) * coso_full**2 * r_fres_full) * ostep
|
|
903
|
+
|
|
904
|
+
return (r_phi + r_j) / (2 - r_phi + r_j)
|
|
905
|
+
|
|
906
|
+
|
|
907
|
+
# ============== Data Manipulation Functions ==============
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
def elem2node(elem: np.ndarray, elemval: np.ndarray, nodelen: int = None) -> np.ndarray:
|
|
911
|
+
"""Interpolate element-based values to nodes."""
|
|
912
|
+
if isinstance(elem, dict):
|
|
913
|
+
nodelen = elem["node"].shape[0]
|
|
914
|
+
elem = elem["elem"]
|
|
915
|
+
|
|
916
|
+
elem_0 = elem[:, :4].astype(int) - 1
|
|
917
|
+
nval = elemval.shape[1] if elemval.ndim > 1 else 1
|
|
918
|
+
|
|
919
|
+
if elemval.ndim == 1:
|
|
920
|
+
elemval = elemval[:, np.newaxis]
|
|
921
|
+
|
|
922
|
+
nodeval = np.zeros((nodelen, nval))
|
|
923
|
+
|
|
924
|
+
for j in range(4):
|
|
925
|
+
np.add.at(nodeval, elem_0[:, j], elemval)
|
|
926
|
+
|
|
927
|
+
nodeval *= 0.25
|
|
928
|
+
|
|
929
|
+
return nodeval.squeeze()
|
|
930
|
+
|
|
931
|
+
|
|
932
|
+
def addnoise(
|
|
933
|
+
data: np.ndarray,
|
|
934
|
+
snrshot: float,
|
|
935
|
+
snrthermal: float = np.inf,
|
|
936
|
+
randseed: int = 123456789,
|
|
937
|
+
) -> np.ndarray:
|
|
938
|
+
"""Add simulated shot and thermal noise to data."""
|
|
939
|
+
np.random.seed(randseed)
|
|
940
|
+
|
|
941
|
+
if np.isinf(snrshot) and np.isinf(snrthermal):
|
|
942
|
+
warnings.warn("No noise added")
|
|
943
|
+
return data.copy()
|
|
944
|
+
|
|
945
|
+
datanorm = np.abs(data)
|
|
946
|
+
max_amp = np.max(datanorm)
|
|
947
|
+
|
|
948
|
+
sigma_shot = 10 ** (-np.real(snrshot) / 20)
|
|
949
|
+
sigma_thermal = max_amp * 10 ** (-np.real(snrthermal) / 20)
|
|
950
|
+
|
|
951
|
+
if np.isreal(data).all():
|
|
952
|
+
newdata = (
|
|
953
|
+
data + np.sqrt(np.abs(data)) * np.random.randn(*data.shape) * sigma_shot
|
|
954
|
+
)
|
|
955
|
+
newdata += np.random.randn(*data.shape) * sigma_thermal
|
|
956
|
+
else:
|
|
957
|
+
sigma_shot_phase = 10 ** (-np.imag(snrshot) / 20)
|
|
958
|
+
sigma_thermal_phase = 10 ** (-np.imag(snrthermal) / 20)
|
|
959
|
+
|
|
960
|
+
amp_shot = np.random.randn(*data.shape) * sigma_shot
|
|
961
|
+
phase_shot = np.random.randn(*data.shape) * sigma_shot_phase * 2 * np.pi
|
|
962
|
+
amp_thermal = np.random.randn(*data.shape) * sigma_thermal
|
|
963
|
+
phase_thermal = np.random.randn(*data.shape) * sigma_thermal_phase * 2 * np.pi
|
|
964
|
+
|
|
965
|
+
shot_noise = np.sqrt(np.abs(data)) * (amp_shot * np.exp(1j * phase_shot))
|
|
966
|
+
thermal_noise = amp_thermal * np.exp(1j * phase_thermal)
|
|
967
|
+
|
|
968
|
+
newdata = data + shot_noise + thermal_noise
|
|
969
|
+
|
|
970
|
+
return newdata
|
|
971
|
+
|
|
972
|
+
|
|
973
|
+
def meshinterp(fromval, elemid, elembary, fromelem, toval=None):
|
|
974
|
+
"""Interpolate nodal values from source mesh to target mesh."""
|
|
975
|
+
|
|
976
|
+
if fromval.ndim == 1:
|
|
977
|
+
fromval = fromval[:, np.newaxis]
|
|
978
|
+
|
|
979
|
+
elem_0 = fromelem[:, :4].astype(int) - 1
|
|
980
|
+
npts = len(elemid)
|
|
981
|
+
ncol = fromval.shape[1]
|
|
982
|
+
|
|
983
|
+
if toval is None:
|
|
984
|
+
newval = np.zeros((npts, ncol))
|
|
985
|
+
else:
|
|
986
|
+
newval = toval.copy() if toval.ndim > 1 else toval[:, np.newaxis].copy()
|
|
987
|
+
|
|
988
|
+
valid = ~np.isnan(elemid)
|
|
989
|
+
valid_idx = np.where(valid)[0]
|
|
990
|
+
valid_eid = elemid[valid].astype(int) - 1
|
|
991
|
+
valid_bary = elembary[valid]
|
|
992
|
+
|
|
993
|
+
node_ids = elem_0[valid_eid]
|
|
994
|
+
vals_at_nodes = fromval[node_ids]
|
|
995
|
+
interp_vals = np.sum(vals_at_nodes * valid_bary[:, :, np.newaxis], axis=1)
|
|
996
|
+
|
|
997
|
+
newval[valid_idx] = interp_vals
|
|
998
|
+
|
|
999
|
+
return newval if newval.shape[1] > 1 else newval.squeeze()
|
|
1000
|
+
|
|
1001
|
+
|
|
1002
|
+
# ============== Private Helper Functions ==============
|
|
1003
|
+
|
|
1004
|
+
|
|
1005
|
+
def _nodevolume(node: np.ndarray, elem: np.ndarray, evol: np.ndarray) -> np.ndarray:
|
|
1006
|
+
"""Compute nodal volumes (1/4 of connected element volumes)."""
|
|
1007
|
+
elem_0 = elem[:, :4].astype(int) - 1
|
|
1008
|
+
nn = node.shape[0]
|
|
1009
|
+
|
|
1010
|
+
nvol = np.zeros(nn)
|
|
1011
|
+
|
|
1012
|
+
for j in range(4):
|
|
1013
|
+
np.add.at(nvol, elem_0[:, j], evol)
|
|
1014
|
+
|
|
1015
|
+
nvol *= 0.25
|
|
1016
|
+
|
|
1017
|
+
return nvol
|
|
1018
|
+
|
|
1019
|
+
|
|
1020
|
+
def _femnz(elem: np.ndarray, nn: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
1021
|
+
"""Get sparse matrix non-zero indices for FEM assembly."""
|
|
1022
|
+
elem_0 = elem[:, :4].astype(int) - 1
|
|
1023
|
+
|
|
1024
|
+
conn = [set() for _ in range(nn)]
|
|
1025
|
+
|
|
1026
|
+
for e in elem_0:
|
|
1027
|
+
for i in range(4):
|
|
1028
|
+
for j in range(4):
|
|
1029
|
+
if i != j:
|
|
1030
|
+
conn[e[i]].add(e[j])
|
|
1031
|
+
|
|
1032
|
+
connnum = np.array([len(c) for c in conn])
|
|
1033
|
+
|
|
1034
|
+
rows = []
|
|
1035
|
+
cols = []
|
|
1036
|
+
for i in range(nn):
|
|
1037
|
+
for j in conn[i]:
|
|
1038
|
+
rows.append(i)
|
|
1039
|
+
cols.append(j)
|
|
1040
|
+
|
|
1041
|
+
return np.array(rows), np.array(cols), connnum
|
|
1042
|
+
|
|
1043
|
+
|
|
1044
|
+
# ============== Fallback implementations ==============
|
|
1045
|
+
|
|
1046
|
+
|
|
1047
|
+
def _meshreorient_fallback(node: np.ndarray, elem: np.ndarray) -> np.ndarray:
|
|
1048
|
+
"""Reorient elements to have positive volume. elem is 1-based."""
|
|
1049
|
+
elem = elem.copy()
|
|
1050
|
+
elem_0 = elem[:, :4].astype(int) - 1
|
|
1051
|
+
|
|
1052
|
+
for i in range(elem.shape[0]):
|
|
1053
|
+
n = node[elem_0[i, :], :]
|
|
1054
|
+
|
|
1055
|
+
v1 = n[1] - n[0]
|
|
1056
|
+
v2 = n[2] - n[0]
|
|
1057
|
+
v3 = n[3] - n[0]
|
|
1058
|
+
vol = np.dot(np.cross(v1, v2), v3)
|
|
1059
|
+
|
|
1060
|
+
if vol < 0:
|
|
1061
|
+
elem[i, [0, 1]] = elem[i, [1, 0]]
|
|
1062
|
+
|
|
1063
|
+
return elem
|
|
1064
|
+
|
|
1065
|
+
|
|
1066
|
+
def _volface_fallback(elem: np.ndarray) -> np.ndarray:
|
|
1067
|
+
"""Extract surface triangles from tetrahedral mesh. elem is 1-based."""
|
|
1068
|
+
elem_0 = elem[:, :4].astype(int) - 1
|
|
1069
|
+
|
|
1070
|
+
faces_0 = np.vstack(
|
|
1071
|
+
[
|
|
1072
|
+
elem_0[:, [0, 2, 1]],
|
|
1073
|
+
elem_0[:, [0, 1, 3]],
|
|
1074
|
+
elem_0[:, [0, 3, 2]],
|
|
1075
|
+
elem_0[:, [1, 2, 3]],
|
|
1076
|
+
]
|
|
1077
|
+
)
|
|
1078
|
+
|
|
1079
|
+
faces_sorted = np.sort(faces_0, axis=1)
|
|
1080
|
+
|
|
1081
|
+
_, indices, counts = np.unique(
|
|
1082
|
+
faces_sorted, axis=0, return_index=True, return_counts=True
|
|
1083
|
+
)
|
|
1084
|
+
|
|
1085
|
+
boundary_idx = indices[counts == 1]
|
|
1086
|
+
|
|
1087
|
+
return faces_0[boundary_idx] + 1
|
|
1088
|
+
|
|
1089
|
+
|
|
1090
|
+
def _elemvolume_fallback(node: np.ndarray, elem: np.ndarray) -> np.ndarray:
|
|
1091
|
+
"""Compute element volumes or areas. elem is 1-based."""
|
|
1092
|
+
elem_0 = elem.astype(int) - 1
|
|
1093
|
+
|
|
1094
|
+
if elem.shape[1] >= 4:
|
|
1095
|
+
n0 = node[elem_0[:, 0], :]
|
|
1096
|
+
v1 = node[elem_0[:, 1], :] - n0
|
|
1097
|
+
v2 = node[elem_0[:, 2], :] - n0
|
|
1098
|
+
v3 = node[elem_0[:, 3], :] - n0
|
|
1099
|
+
|
|
1100
|
+
vol = np.abs(np.sum(np.cross(v1, v2) * v3, axis=1)) / 6.0
|
|
1101
|
+
elif elem.shape[1] == 3:
|
|
1102
|
+
v1 = node[elem_0[:, 1], :] - node[elem_0[:, 0], :]
|
|
1103
|
+
v2 = node[elem_0[:, 2], :] - node[elem_0[:, 0], :]
|
|
1104
|
+
|
|
1105
|
+
vol = 0.5 * np.sqrt(np.sum(np.cross(v1, v2) ** 2, axis=1))
|
|
1106
|
+
else:
|
|
1107
|
+
raise ValueError(f"Unsupported element type with {elem.shape[1]} nodes")
|
|
1108
|
+
|
|
1109
|
+
return vol
|
|
1110
|
+
|
|
1111
|
+
|
|
1112
|
+
def forcearray(cfg: dict, keys: List[str]) -> dict:
|
|
1113
|
+
"""Convert list-valued cfg entries to numpy arrays."""
|
|
1114
|
+
for key in keys:
|
|
1115
|
+
if key in cfg and isinstance(cfg[key], list):
|
|
1116
|
+
cfg[key] = np.array(cfg[key])
|
|
1117
|
+
return cfg
|