isoview 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isoview/__init__.py +36 -0
- isoview/array.py +11 -0
- isoview/config.py +213 -0
- isoview/corrections.py +135 -0
- isoview/fusion.py +979 -0
- isoview/intensity.py +427 -0
- isoview/io.py +942 -0
- isoview/masks.py +421 -0
- isoview/pipeline.py +913 -0
- isoview/segmentation.py +173 -0
- isoview/temporal.py +373 -0
- isoview/transforms.py +1115 -0
- isoview/viz.py +723 -0
- isoview-0.1.0.dist-info/METADATA +370 -0
- isoview-0.1.0.dist-info/RECORD +17 -0
- isoview-0.1.0.dist-info/WHEEL +4 -0
- isoview-0.1.0.dist-info/entry_points.txt +2 -0
isoview/segmentation.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""Foreground segmentation for microscopy data."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from scipy.ndimage import convolve1d
|
|
5
|
+
|
|
6
|
+
from .corrections import percentile_interp
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _make_gaussian_kernel(sigma: float, size: int) -> np.ndarray:
|
|
10
|
+
"""Create 1D Gaussian kernel."""
|
|
11
|
+
half = int(np.ceil(size / 2))
|
|
12
|
+
x = np.arange(-half, half + 1)
|
|
13
|
+
kernel = np.exp(-(x ** 2) / (2 * sigma ** 2))
|
|
14
|
+
return kernel / kernel.sum()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _imgaussian_anisotropy(volume: np.ndarray, sigma: np.ndarray, size: np.ndarray) -> np.ndarray:
|
|
18
|
+
"""Apply separable 3D anisotropic Gaussian filter."""
|
|
19
|
+
kernel_z = _make_gaussian_kernel(sigma[0], size[0])
|
|
20
|
+
kernel_y = _make_gaussian_kernel(sigma[1], size[1])
|
|
21
|
+
kernel_x = _make_gaussian_kernel(sigma[2], size[2])
|
|
22
|
+
|
|
23
|
+
result = convolve1d(volume, kernel_z, axis=0, mode='nearest')
|
|
24
|
+
result = convolve1d(result, kernel_y, axis=1, mode='nearest')
|
|
25
|
+
result = convolve1d(result, kernel_x, axis=2, mode='nearest')
|
|
26
|
+
|
|
27
|
+
return result
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def segment_foreground(
|
|
31
|
+
volume: np.ndarray,
|
|
32
|
+
kernel_sigma: float,
|
|
33
|
+
kernel_size: int,
|
|
34
|
+
threshold: float,
|
|
35
|
+
mask_percentile: float,
|
|
36
|
+
scaling: float,
|
|
37
|
+
splitting: int = 10,
|
|
38
|
+
subsample_factor: int = 100,
|
|
39
|
+
verbose: bool = False,
|
|
40
|
+
) -> np.ndarray:
|
|
41
|
+
"""Generate binary foreground mask using anisotropic gaussian filtering.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
volume: 3d image stack (z, y, x)
|
|
45
|
+
kernel_sigma: gaussian sigma in xy
|
|
46
|
+
kernel_size: gaussian kernel size in xy
|
|
47
|
+
threshold: adaptive threshold (0-1)
|
|
48
|
+
mask_percentile: percentile for minimum intensity
|
|
49
|
+
scaling: z/xy pixel spacing ratio
|
|
50
|
+
splitting: number of slabs for memory efficiency
|
|
51
|
+
subsample_factor: subsampling for percentile calculation
|
|
52
|
+
verbose: print statistics
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
binary mask (uint16: 0 or 1)
|
|
56
|
+
"""
|
|
57
|
+
sigma_z = max(1.0, kernel_sigma / scaling)
|
|
58
|
+
size_z = max(1, int(kernel_size / scaling))
|
|
59
|
+
sigma = np.array([sigma_z, kernel_sigma, kernel_sigma])
|
|
60
|
+
size = np.array([size_z, kernel_size, kernel_size])
|
|
61
|
+
|
|
62
|
+
z_dim, y_dim, x_dim = volume.shape
|
|
63
|
+
filtered = np.zeros_like(volume, dtype=np.uint16)
|
|
64
|
+
margin = 2 * kernel_size
|
|
65
|
+
|
|
66
|
+
for i in range(splitting):
|
|
67
|
+
start = max(0, round((i / splitting) * y_dim) - margin)
|
|
68
|
+
stop = min(y_dim, round(((i + 1) / splitting) * y_dim) + margin)
|
|
69
|
+
|
|
70
|
+
slab = volume[:, start:stop, :].astype(np.float64)
|
|
71
|
+
conv = _imgaussian_anisotropy(slab, sigma, size)
|
|
72
|
+
|
|
73
|
+
if i == 0:
|
|
74
|
+
filtered[:, :stop - margin, :] = np.round(conv[:, : stop - margin - start, :]).astype(np.uint16)
|
|
75
|
+
elif i == splitting - 1:
|
|
76
|
+
filtered[:, start + margin:, :] = np.round(conv[:, margin:, :]).astype(np.uint16)
|
|
77
|
+
else:
|
|
78
|
+
inner_start = margin
|
|
79
|
+
inner_stop = stop - start - margin
|
|
80
|
+
filtered[:, start + margin : stop - margin, :] = np.round(conv[:, inner_start:inner_stop, :]).astype(np.uint16)
|
|
81
|
+
|
|
82
|
+
min_intensity = percentile_interp(filtered.ravel()[::subsample_factor], mask_percentile)
|
|
83
|
+
|
|
84
|
+
total_sum = 0.0
|
|
85
|
+
total_count = 0
|
|
86
|
+
for i in range(splitting):
|
|
87
|
+
start = round((i / splitting) * y_dim)
|
|
88
|
+
stop = round(((i + 1) / splitting) * y_dim)
|
|
89
|
+
slab = filtered[:, start:stop, :]
|
|
90
|
+
above_min = slab[slab > min_intensity]
|
|
91
|
+
total_sum += float(above_min.sum())
|
|
92
|
+
total_count += above_min.size
|
|
93
|
+
|
|
94
|
+
mean_intensity = total_sum / total_count if total_count > 0 else min_intensity
|
|
95
|
+
level = min_intensity + (mean_intensity - min_intensity) * threshold
|
|
96
|
+
|
|
97
|
+
mask = (filtered > level).astype(np.uint16)
|
|
98
|
+
|
|
99
|
+
if verbose:
|
|
100
|
+
fg_pct = 100 * mask.sum() / mask.size
|
|
101
|
+
print(f"foreground: {fg_pct:.1f}%, min={min_intensity:.2f}, mean={mean_intensity:.2f}, level={level:.2f}")
|
|
102
|
+
|
|
103
|
+
return mask
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def fuse_masks(masks: list[np.ndarray]) -> np.ndarray:
|
|
107
|
+
"""Fuse multiple segmentation masks using logical or.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
masks: list of binary masks
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
fused binary mask
|
|
114
|
+
"""
|
|
115
|
+
if not masks:
|
|
116
|
+
raise ValueError("no masks to fuse")
|
|
117
|
+
|
|
118
|
+
result = masks[0].astype(bool)
|
|
119
|
+
for mask in masks[1:]:
|
|
120
|
+
result |= mask.astype(bool)
|
|
121
|
+
|
|
122
|
+
return result.astype(np.uint16)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def create_coordinate_masks(
|
|
126
|
+
mask: np.ndarray,
|
|
127
|
+
splitting: int = 10,
|
|
128
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
129
|
+
"""Generate coordinate masks for interpolation.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
mask: binary foreground mask (z, y, x)
|
|
133
|
+
splitting: number of slabs to split y-axis for memory-efficient processing
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
xz_mask: shape (y, z), stores mean x coordinate at each (y, z) position
|
|
137
|
+
xy_mask: shape (y, x), stores mean z coordinate at each (y, x) position
|
|
138
|
+
"""
|
|
139
|
+
z_size, y_size, x_size = mask.shape
|
|
140
|
+
|
|
141
|
+
# xz_mask: (y, z) - for each (y, z), mean x coordinate
|
|
142
|
+
xz_mask = np.zeros((y_size, z_size), dtype=np.uint16)
|
|
143
|
+
for i in range(splitting):
|
|
144
|
+
y_start = round((i / splitting) * y_size)
|
|
145
|
+
y_stop = round(((i + 1) / splitting) * y_size)
|
|
146
|
+
slab_height = y_stop - y_start
|
|
147
|
+
|
|
148
|
+
# x coordinates: broadcast to (z, slab_height, x)
|
|
149
|
+
x_coords = np.arange(x_size)[np.newaxis, np.newaxis, :]
|
|
150
|
+
coord_vol = np.broadcast_to(x_coords, (z_size, slab_height, x_size)).astype(np.float32)
|
|
151
|
+
coord_vol = np.copy(coord_vol)
|
|
152
|
+
coord_vol[mask[:, y_start:y_stop, :] == 0] = np.nan
|
|
153
|
+
# average over x axis (axis=2), result is (z, slab_height)
|
|
154
|
+
slab_result = np.round(np.nanmean(coord_vol, axis=2)).astype(np.uint16)
|
|
155
|
+
# transpose to (slab_height, z) and store
|
|
156
|
+
xz_mask[y_start:y_stop, :] = slab_result.T
|
|
157
|
+
|
|
158
|
+
# xy_mask: (y, x) - for each (y, x), mean z coordinate
|
|
159
|
+
xy_mask = np.zeros((y_size, x_size), dtype=np.uint16)
|
|
160
|
+
for i in range(splitting):
|
|
161
|
+
y_start = round((i / splitting) * y_size)
|
|
162
|
+
y_stop = round(((i + 1) / splitting) * y_size)
|
|
163
|
+
slab_height = y_stop - y_start
|
|
164
|
+
|
|
165
|
+
# z coordinates: broadcast to (z, slab_height, x)
|
|
166
|
+
z_coords = np.arange(z_size)[:, np.newaxis, np.newaxis]
|
|
167
|
+
coord_vol = np.broadcast_to(z_coords, (z_size, slab_height, x_size)).astype(np.float32)
|
|
168
|
+
coord_vol = np.copy(coord_vol)
|
|
169
|
+
coord_vol[mask[:, y_start:y_stop, :] == 0] = np.nan
|
|
170
|
+
# average over z axis (axis=0), result is (slab_height, x)
|
|
171
|
+
xy_mask[y_start:y_stop, :] = np.round(np.nanmean(coord_vol, axis=0)).astype(np.uint16)
|
|
172
|
+
|
|
173
|
+
return xz_mask, xy_mask
|
isoview/temporal.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
"""Temporal parameter processing for multi-timepoint fusion."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from scipy.signal import savgol_filter
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def smooth_parameters_rloess(
|
|
9
|
+
timepoints: np.ndarray,
|
|
10
|
+
parameters: np.ndarray,
|
|
11
|
+
window_size: int = 100
|
|
12
|
+
) -> np.ndarray:
|
|
13
|
+
"""
|
|
14
|
+
Apply robust locally-weighted scatterplot smoothing to parameters.
|
|
15
|
+
|
|
16
|
+
Parameters
|
|
17
|
+
----------
|
|
18
|
+
timepoints : ndarray
|
|
19
|
+
Time point indices (N,)
|
|
20
|
+
parameters : ndarray
|
|
21
|
+
Parameter values (N,) or (N, M) for M parameters
|
|
22
|
+
window_size : int, default=100
|
|
23
|
+
Smoothing window size
|
|
24
|
+
|
|
25
|
+
Returns
|
|
26
|
+
-------
|
|
27
|
+
ndarray
|
|
28
|
+
Smoothed parameters, same shape as input
|
|
29
|
+
|
|
30
|
+
Notes
|
|
31
|
+
-----
|
|
32
|
+
Uses Savitzky-Golay filter as approximation to rloess.
|
|
33
|
+
Window size adjusted to odd number if needed.
|
|
34
|
+
"""
|
|
35
|
+
if parameters.ndim == 1:
|
|
36
|
+
parameters = parameters[:, np.newaxis]
|
|
37
|
+
squeeze = True
|
|
38
|
+
else:
|
|
39
|
+
squeeze = False
|
|
40
|
+
|
|
41
|
+
N, M = parameters.shape
|
|
42
|
+
smoothed = np.zeros_like(parameters)
|
|
43
|
+
|
|
44
|
+
# Ensure window size is odd and valid
|
|
45
|
+
window = min(window_size, N)
|
|
46
|
+
if window % 2 == 0:
|
|
47
|
+
window -= 1
|
|
48
|
+
if window < 3:
|
|
49
|
+
return parameters.squeeze() if squeeze else parameters
|
|
50
|
+
|
|
51
|
+
# Apply savgol filter to each parameter column
|
|
52
|
+
for m in range(M):
|
|
53
|
+
valid_mask = ~np.isnan(parameters[:, m])
|
|
54
|
+
if valid_mask.sum() < window:
|
|
55
|
+
smoothed[:, m] = parameters[:, m]
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
# Apply filter only to valid values
|
|
59
|
+
smoothed[valid_mask, m] = savgol_filter(
|
|
60
|
+
parameters[valid_mask, m],
|
|
61
|
+
window_length=min(window, valid_mask.sum()),
|
|
62
|
+
polyorder=2,
|
|
63
|
+
mode='interp'
|
|
64
|
+
)
|
|
65
|
+
# Preserve NaN locations
|
|
66
|
+
smoothed[~valid_mask, m] = np.nan
|
|
67
|
+
|
|
68
|
+
return smoothed.squeeze() if squeeze else smoothed
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def interpolate_missing_timepoints(
|
|
72
|
+
full_interval: np.ndarray,
|
|
73
|
+
measured_timepoints: np.ndarray,
|
|
74
|
+
measured_parameters: np.ndarray
|
|
75
|
+
) -> np.ndarray:
|
|
76
|
+
"""
|
|
77
|
+
Interpolate parameters for missing timepoints.
|
|
78
|
+
|
|
79
|
+
Parameters
|
|
80
|
+
----------
|
|
81
|
+
full_interval : ndarray
|
|
82
|
+
All timepoints to fill (M,)
|
|
83
|
+
measured_timepoints : ndarray
|
|
84
|
+
Timepoints with measurements (N,)
|
|
85
|
+
measured_parameters : ndarray
|
|
86
|
+
Parameter values at measured timepoints (N,) or (N, P)
|
|
87
|
+
|
|
88
|
+
Returns
|
|
89
|
+
-------
|
|
90
|
+
ndarray
|
|
91
|
+
Parameters for all timepoints (M,) or (M, P)
|
|
92
|
+
|
|
93
|
+
Notes
|
|
94
|
+
-----
|
|
95
|
+
Uses distance-weighted linear interpolation between nearest neighbors.
|
|
96
|
+
If no neighbors on both sides, uses nearest value.
|
|
97
|
+
"""
|
|
98
|
+
if measured_parameters.ndim == 1:
|
|
99
|
+
measured_parameters = measured_parameters[:, np.newaxis]
|
|
100
|
+
squeeze = True
|
|
101
|
+
else:
|
|
102
|
+
squeeze = False
|
|
103
|
+
|
|
104
|
+
M = len(full_interval)
|
|
105
|
+
_, P = measured_parameters.shape
|
|
106
|
+
interpolated = np.zeros((M, P))
|
|
107
|
+
|
|
108
|
+
for i, tp in enumerate(full_interval):
|
|
109
|
+
if tp in measured_timepoints:
|
|
110
|
+
# Direct measurement available
|
|
111
|
+
idx = np.where(measured_timepoints == tp)[0][0]
|
|
112
|
+
interpolated[i, :] = measured_parameters[idx, :]
|
|
113
|
+
else:
|
|
114
|
+
# Interpolate from neighbors
|
|
115
|
+
distances = measured_timepoints - tp
|
|
116
|
+
positive_dist = distances * (distances > 0)
|
|
117
|
+
negative_dist = distances * (distances < 0)
|
|
118
|
+
|
|
119
|
+
has_upper = np.any(positive_dist > 0)
|
|
120
|
+
has_lower = np.any(negative_dist < 0)
|
|
121
|
+
|
|
122
|
+
if has_upper and has_lower:
|
|
123
|
+
# Interpolate between neighbors
|
|
124
|
+
upper_idx = np.argmin(positive_dist[positive_dist > 0])
|
|
125
|
+
lower_idx = np.argmax(negative_dist[negative_dist < 0])
|
|
126
|
+
|
|
127
|
+
# Get actual indices in measured arrays
|
|
128
|
+
upper_idx = np.where(positive_dist == positive_dist[positive_dist > 0][upper_idx])[0][0]
|
|
129
|
+
lower_idx = np.where(negative_dist == negative_dist[negative_dist < 0][lower_idx])[0][0]
|
|
130
|
+
|
|
131
|
+
upper_dist = measured_timepoints[upper_idx] - tp
|
|
132
|
+
lower_dist = tp - measured_timepoints[lower_idx]
|
|
133
|
+
|
|
134
|
+
# Distance-weighted interpolation
|
|
135
|
+
total_dist = upper_dist + lower_dist
|
|
136
|
+
upper_weight = lower_dist / total_dist
|
|
137
|
+
lower_weight = upper_dist / total_dist
|
|
138
|
+
|
|
139
|
+
interpolated[i, :] = (
|
|
140
|
+
upper_weight * measured_parameters[upper_idx, :] +
|
|
141
|
+
lower_weight * measured_parameters[lower_idx, :]
|
|
142
|
+
)
|
|
143
|
+
else:
|
|
144
|
+
# Use nearest neighbor
|
|
145
|
+
abs_dist = np.abs(distances)
|
|
146
|
+
nearest_idx = np.argmin(abs_dist)
|
|
147
|
+
interpolated[i, :] = measured_parameters[nearest_idx, :]
|
|
148
|
+
|
|
149
|
+
return interpolated.squeeze() if squeeze else interpolated
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def apply_temporal_averaging(
|
|
153
|
+
parameters: np.ndarray,
|
|
154
|
+
offset_range: int = 10,
|
|
155
|
+
angle_range: int = 10,
|
|
156
|
+
intensity_range: int = 5,
|
|
157
|
+
method: str = "median",
|
|
158
|
+
parameter_types: Optional[list[str]] = None
|
|
159
|
+
) -> np.ndarray:
|
|
160
|
+
"""
|
|
161
|
+
Apply temporal averaging with different windows per parameter type.
|
|
162
|
+
|
|
163
|
+
Parameters
|
|
164
|
+
----------
|
|
165
|
+
parameters : ndarray
|
|
166
|
+
Parameter matrix (T, P) for T timepoints and P parameters
|
|
167
|
+
offset_range : int, default=10
|
|
168
|
+
Averaging range for spatial offsets
|
|
169
|
+
angle_range : int, default=10
|
|
170
|
+
Averaging range for rotation angles
|
|
171
|
+
intensity_range : int, default=5
|
|
172
|
+
Averaging range for intensity correction
|
|
173
|
+
method : str, default='median'
|
|
174
|
+
Averaging method: 'median' or 'mean'
|
|
175
|
+
parameter_types : list of str or None
|
|
176
|
+
Type of each parameter: 'offset', 'angle', 'intensity', 'flag'
|
|
177
|
+
If None, assumes first params are offsets, then angles, then intensity
|
|
178
|
+
|
|
179
|
+
Returns
|
|
180
|
+
-------
|
|
181
|
+
ndarray
|
|
182
|
+
Averaged parameters (T, P)
|
|
183
|
+
|
|
184
|
+
Notes
|
|
185
|
+
-----
|
|
186
|
+
Flags are not averaged, just copied.
|
|
187
|
+
Each parameter type uses its own temporal window.
|
|
188
|
+
"""
|
|
189
|
+
T, P = parameters.shape
|
|
190
|
+
averaged = np.zeros_like(parameters)
|
|
191
|
+
|
|
192
|
+
# Default parameter type assignment if not provided
|
|
193
|
+
if parameter_types is None:
|
|
194
|
+
# Assume typical order: offsets, angles, intensity, flags
|
|
195
|
+
parameter_types = ['offset'] * min(3, P)
|
|
196
|
+
if P > 3:
|
|
197
|
+
parameter_types.append('angle')
|
|
198
|
+
if P > 4:
|
|
199
|
+
parameter_types.append('intensity')
|
|
200
|
+
if P > 5:
|
|
201
|
+
parameter_types.extend(['flag'] * (P - 5))
|
|
202
|
+
|
|
203
|
+
for p, ptype in enumerate(parameter_types):
|
|
204
|
+
if ptype == 'flag':
|
|
205
|
+
# Flags not averaged
|
|
206
|
+
averaged[:, p] = parameters[:, p]
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
# Determine window size
|
|
210
|
+
if ptype == 'offset':
|
|
211
|
+
window = offset_range
|
|
212
|
+
elif ptype == 'angle':
|
|
213
|
+
window = angle_range
|
|
214
|
+
elif ptype == 'intensity':
|
|
215
|
+
window = intensity_range
|
|
216
|
+
else:
|
|
217
|
+
window = offset_range # default
|
|
218
|
+
|
|
219
|
+
# Apply temporal averaging
|
|
220
|
+
for t in range(T):
|
|
221
|
+
t_start = max(0, t - window)
|
|
222
|
+
t_end = min(T, t + window + 1)
|
|
223
|
+
window_data = parameters[t_start:t_end, p]
|
|
224
|
+
|
|
225
|
+
# Remove NaN values
|
|
226
|
+
valid_data = window_data[~np.isnan(window_data)]
|
|
227
|
+
|
|
228
|
+
if len(valid_data) == 0:
|
|
229
|
+
averaged[t, p] = np.nan
|
|
230
|
+
elif method == 'median':
|
|
231
|
+
averaged[t, p] = np.median(valid_data)
|
|
232
|
+
elif method == 'mean':
|
|
233
|
+
averaged[t, p] = np.mean(valid_data)
|
|
234
|
+
else:
|
|
235
|
+
raise ValueError(f"Unknown averaging method: {method}")
|
|
236
|
+
|
|
237
|
+
return averaged
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def compute_static_parameters(
|
|
241
|
+
parameters: np.ndarray,
|
|
242
|
+
method: str = "mean"
|
|
243
|
+
) -> np.ndarray:
|
|
244
|
+
"""
|
|
245
|
+
Compute static (time-averaged) parameters.
|
|
246
|
+
|
|
247
|
+
Parameters
|
|
248
|
+
----------
|
|
249
|
+
parameters : ndarray
|
|
250
|
+
Parameter matrix (T, P)
|
|
251
|
+
method : str, default='mean'
|
|
252
|
+
Averaging method: 'mean' or 'median'
|
|
253
|
+
|
|
254
|
+
Returns
|
|
255
|
+
-------
|
|
256
|
+
ndarray
|
|
257
|
+
Static parameters (T, P) with same value for all timepoints
|
|
258
|
+
|
|
259
|
+
Notes
|
|
260
|
+
-----
|
|
261
|
+
Computes global average across all timepoints, then broadcasts to all times.
|
|
262
|
+
Useful for datasets with minimal drift.
|
|
263
|
+
"""
|
|
264
|
+
T, P = parameters.shape
|
|
265
|
+
|
|
266
|
+
if method == 'mean':
|
|
267
|
+
static_values = np.nanmean(parameters, axis=0)
|
|
268
|
+
elif method == 'median':
|
|
269
|
+
static_values = np.nanmedian(parameters, axis=0)
|
|
270
|
+
else:
|
|
271
|
+
raise ValueError(f"Unknown method: {method}")
|
|
272
|
+
|
|
273
|
+
# Broadcast to all timepoints
|
|
274
|
+
static_params = np.tile(static_values, (T, 1))
|
|
275
|
+
|
|
276
|
+
return static_params
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def create_lookup_table(
|
|
280
|
+
timepoints: np.ndarray,
|
|
281
|
+
transformations: dict,
|
|
282
|
+
intensity_corrections: dict,
|
|
283
|
+
mode: str = 'camera'
|
|
284
|
+
) -> dict:
|
|
285
|
+
"""
|
|
286
|
+
Create lookup table from transformation and intensity correction data.
|
|
287
|
+
|
|
288
|
+
Parameters
|
|
289
|
+
----------
|
|
290
|
+
timepoints : ndarray
|
|
291
|
+
Time point indices
|
|
292
|
+
transformations : dict
|
|
293
|
+
Dict mapping timepoints to transformation dicts
|
|
294
|
+
intensity_corrections : dict
|
|
295
|
+
Dict mapping timepoints to intensity correction dicts
|
|
296
|
+
mode : str, default='camera'
|
|
297
|
+
Fusion mode: 'camera', 'channel', or 'full'
|
|
298
|
+
|
|
299
|
+
Returns
|
|
300
|
+
-------
|
|
301
|
+
dict
|
|
302
|
+
Lookup table with arrays for each parameter
|
|
303
|
+
|
|
304
|
+
Notes
|
|
305
|
+
-----
|
|
306
|
+
Includes timepoint column and all transformation/intensity parameters.
|
|
307
|
+
"""
|
|
308
|
+
N = len(timepoints)
|
|
309
|
+
|
|
310
|
+
# Initialize lookup table
|
|
311
|
+
lookup = {
|
|
312
|
+
'timepoints': timepoints.copy(),
|
|
313
|
+
'x_offset': np.zeros(N),
|
|
314
|
+
'y_offset': np.zeros(N),
|
|
315
|
+
'z_offset': np.zeros(N),
|
|
316
|
+
'rotation': np.zeros(N),
|
|
317
|
+
'correlation': np.zeros(N),
|
|
318
|
+
'intensity_factor': np.ones(N),
|
|
319
|
+
'intensity_operation': np.zeros(N, dtype=int),
|
|
320
|
+
'intensity_correlation': np.zeros(N)
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
# Fill from dictionaries
|
|
324
|
+
for i, tp in enumerate(timepoints):
|
|
325
|
+
if tp in transformations:
|
|
326
|
+
t = transformations[tp]
|
|
327
|
+
lookup['x_offset'][i] = t.get('x_offset', 0.0)
|
|
328
|
+
lookup['y_offset'][i] = t.get('y_offset', 0.0)
|
|
329
|
+
lookup['z_offset'][i] = t.get('z_offset', 0.0)
|
|
330
|
+
lookup['rotation'][i] = t.get('rotation', 0.0)
|
|
331
|
+
lookup['correlation'][i] = t.get('correlation', 0.0)
|
|
332
|
+
|
|
333
|
+
if tp in intensity_corrections:
|
|
334
|
+
ic = intensity_corrections[tp]
|
|
335
|
+
lookup['intensity_factor'][i] = ic.get('factor', 1.0)
|
|
336
|
+
# Convert operation to flag: multiply=2, divide=1
|
|
337
|
+
op = ic.get('operation', 'multiply')
|
|
338
|
+
lookup['intensity_operation'][i] = 2 if op == 'multiply' else 1
|
|
339
|
+
lookup['intensity_correlation'][i] = ic.get('overlap_correlation', 0.0)
|
|
340
|
+
|
|
341
|
+
return lookup
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
def save_lookup_table(
|
|
345
|
+
lookup_table: dict,
|
|
346
|
+
output_path: str,
|
|
347
|
+
format: str = 'npz'
|
|
348
|
+
) -> None:
|
|
349
|
+
"""
|
|
350
|
+
Save lookup table to file.
|
|
351
|
+
|
|
352
|
+
Parameters
|
|
353
|
+
----------
|
|
354
|
+
lookup_table : dict
|
|
355
|
+
Lookup table from create_lookup_table
|
|
356
|
+
output_path : str
|
|
357
|
+
Output file path
|
|
358
|
+
format : str, default='npz'
|
|
359
|
+
File format: 'npz' or 'csv'
|
|
360
|
+
|
|
361
|
+
Notes
|
|
362
|
+
-----
|
|
363
|
+
NPZ format preserves all data types and is compact.
|
|
364
|
+
CSV format is human-readable but loses precision.
|
|
365
|
+
"""
|
|
366
|
+
if format == 'npz':
|
|
367
|
+
np.savez(output_path, **lookup_table)
|
|
368
|
+
elif format == 'csv':
|
|
369
|
+
import pandas as pd
|
|
370
|
+
df = pd.DataFrame(lookup_table)
|
|
371
|
+
df.to_csv(output_path, index=False)
|
|
372
|
+
else:
|
|
373
|
+
raise ValueError(f"Unknown format: {format}")
|